summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c13
-rw-r--r--drivers/acpi/acpi_mrrm.c4
-rw-r--r--drivers/acpi/acpica/utnonansi.c2
-rw-r--r--drivers/acpi/apei/einj-core.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c761
-rw-r--r--drivers/char/ipmi/ipmi_si.h10
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c116
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c52
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c27
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c92
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/bcm/clk-kona.c18
-rw-r--r--drivers/clk/bcm/clk-kona.h2
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c3
-rw-r--r--drivers/clk/davinci/pll.c26
-rw-r--r--drivers/clk/meson/Kconfig16
-rw-r--r--drivers/clk/meson/g12a.c1
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c6
-rw-r--r--drivers/clk/qcom/camcc-sa8775p.c103
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c18
-rw-r--r--drivers/clk/qcom/clk-rpmh.c11
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c4
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c6
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c2
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c3
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c6
-rw-r--r--drivers/clk/renesas/Kconfig5
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c52
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c152
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c36
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c3
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c186
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h94
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-gate-grf.c105
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c24
-rw-r--r--drivers/clk/rockchip/clk-pll.c11
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c11
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c6
-rw-r--r--drivers/clk/rockchip/clk-rk3528.c83
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c3
-rw-r--r--drivers/clk/rockchip/clk-rk3576.c60
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c1
-rw-r--r--drivers/clk/rockchip/clk-rv1126.c2
-rw-r--r--drivers/clk/rockchip/clk.c38
-rw-r--r--drivers/clk/rockchip/clk.h75
-rw-r--r--drivers/clk/samsung/clk-exynos4.c74
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c338
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c6
-rw-r--r--drivers/clk/socfpga/clk-pll.c4
-rw-r--r--drivers/clk/sophgo/Kconfig19
-rw-r--r--drivers/clk/sophgo/Makefile2
-rw-r--r--drivers/clk/sophgo/clk-cv1800.c2
-rw-r--r--drivers/clk/sophgo/clk-sg2044-pll.c628
-rw-r--r--drivers/clk/sophgo/clk-sg2044.c1812
-rw-r--r--drivers/clk/spacemit/Kconfig18
-rw-r--r--drivers/clk/spacemit/Makefile5
-rw-r--r--drivers/clk/spacemit/ccu-k1.c1164
-rw-r--r--drivers/clk/spacemit/ccu_common.h48
-rw-r--r--drivers/clk/spacemit/ccu_ddn.c83
-rw-r--r--drivers/clk/spacemit/ccu_ddn.h48
-rw-r--r--drivers/clk/spacemit/ccu_mix.c268
-rw-r--r--drivers/clk/spacemit/ccu_mix.h218
-rw-r--r--drivers/clk/spacemit/ccu_pll.c157
-rw-r--r--drivers/clk/spacemit/ccu_pll.h86
-rw-r--r--drivers/clk/sunxi-ng/Kconfig48
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c25
-rw-r--r--drivers/clk/sunxi/Kconfig10
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c196
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs226
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c36
-rw-r--r--drivers/cpuidle/cpuidle-psci.c9
-rw-r--r--drivers/dma/idxd/init.c41
-rw-r--r--drivers/firmware/efi/Kconfig24
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c1
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S32
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds11
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c3
-rw-r--r--drivers/i2c/busses/Kconfig13
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c3
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c223
-rw-r--r--drivers/i2c/busses/i2c-davinci.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-amdisp.c205
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c12
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c7
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c102
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c87
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c18
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c166
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h13
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c119
-rw-r--r--drivers/i2c/busses/i2c-pasemi-pci.c10
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-powermac.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c19
-rw-r--r--drivers/i2c/busses/i2c-riic.c53
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c5
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c5
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c3
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c24
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c24
-rw-r--r--drivers/i2c/busses/i2c-via.c15
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c20
-rw-r--r--drivers/i2c/busses/i2c-viapro.c33
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c17
-rw-r--r--drivers/i2c/busses/i2c-virtio.c7
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c57
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/busses/scx200_acb.c6
-rw-r--r--drivers/i2c/i2c-atr.c570
-rw-r--r--drivers/i2c/i2c-core-base.c67
-rw-r--r--drivers/i2c/i2c-core-of.c1
-rw-r--r--drivers/i2c/i2c-core-slave.c12
-rw-r--r--drivers/i2c/i2c-core-smbus.c3
-rw-r--r--drivers/i2c/i2c-smbus.c21
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c10
-rw-r--r--drivers/infiniband/core/cm.c78
-rw-r--r--drivers/infiniband/core/cm_trace.h2
-rw-r--r--drivers/infiniband/core/cma.c25
-rw-r--r--drivers/infiniband/core/cma_trace.h2
-rw-r--r--drivers/infiniband/core/iwcm.c29
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c271
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c20
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h1
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c10
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c18
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c2
-rw-r--r--drivers/infiniband/hw/hns/Makefile1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_trace.h216
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c2
-rw-r--r--drivers/infiniband/hw/irdma/pble.c2
-rw-r--r--drivers/infiniband/hw/mana/cq.c4
-rw-r--r--drivers/infiniband/hw/mana/device.c174
-rw-r--r--drivers/infiniband/hw/mana/main.c92
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h7
-rw-r--r--drivers/infiniband/hw/mana/mr.c29
-rw-r--r--drivers/infiniband/hw/mana/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c8
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c58
-rw-r--r--drivers/infiniband/hw/mlx5/main.c29
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h13
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c65
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c30
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c66
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c144
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h2
-rw-r--r--drivers/infiniband/sw/siw/siw.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c28
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c8
-rw-r--r--drivers/iommu/Kconfig158
-rw-r--r--drivers/iommu/Makefile6
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd/init.c94
-rw-r--r--drivers/iommu/amd/io_pgtable.c38
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c12
-rw-r--r--drivers/iommu/amd/iommu.c94
-rw-r--r--drivers/iommu/amd/ppr.c2
-rw-r--r--drivers/iommu/apple-dart.c3
-rw-r--r--drivers/iommu/arm/Kconfig144
-rw-r--r--drivers/iommu/arm/Makefile3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c86
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c138
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h39
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c9
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c44
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c6
-rw-r--r--drivers/iommu/dma-iommu.c11
-rw-r--r--drivers/iommu/exynos-iommu.c12
-rw-r--r--drivers/iommu/fsl_pamu_domain.c2
-rw-r--r--drivers/iommu/intel/Makefile7
-rw-r--r--drivers/iommu/intel/dmar.c14
-rw-r--r--drivers/iommu/intel/iommu.c244
-rw-r--r--drivers/iommu/intel/iommu.h62
-rw-r--r--drivers/iommu/intel/irq_remapping.c12
-rw-r--r--drivers/iommu/intel/nested.c20
-rw-r--r--drivers/iommu/intel/pasid.c13
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/prq.c7
-rw-r--r--drivers/iommu/intel/svm.c9
-rw-r--r--drivers/iommu/io-pgtable-arm.c58
-rw-r--r--drivers/iommu/io-pgtable-dart.c23
-rw-r--r--drivers/iommu/iommu-pages.c119
-rw-r--r--drivers/iommu/iommu-pages.h195
-rw-r--r--drivers/iommu/iommu-sva.c18
-rw-r--r--drivers/iommu/iommu.c107
-rw-r--r--drivers/iommu/iommufd/device.c59
-rw-r--r--drivers/iommu/iommufd/eventq.c48
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h6
-rw-r--r--drivers/iommu/iommufd/selftest.c57
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/mtk_iommu.c37
-rw-r--r--drivers/iommu/riscv/Makefile2
-rw-r--r--drivers/iommu/riscv/iommu.c43
-rw-r--r--drivers/iommu/rockchip-iommu.c14
-rw-r--r--drivers/iommu/s390-iommu.c345
-rw-r--r--drivers/iommu/sun50i-iommu.c6
-rw-r--r--drivers/iommu/tegra-smmu.c111
-rw-r--r--drivers/iommu/virtio-iommu.c187
-rw-r--r--drivers/media/i2c/ds90ub913.c9
-rw-r--r--drivers/media/i2c/ds90ub953.c9
-rw-r--r--drivers/media/i2c/ds90ub960.c54
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ti_fpc202.c438
-rw-r--r--drivers/misc/uacce/uacce.c40
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c19
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c108
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c9
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c6
-rw-r--r--drivers/pinctrl/core.c29
-rw-r--r--drivers/pinctrl/freescale/Kconfig11
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx-scmi.c4
-rw-r--r--drivers/pinctrl/mediatek/Kconfig22
-rw-r--r--drivers/pinctrl/mediatek/Makefile2
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c30
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c19
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c18
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6893.c879
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8196.c1860
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c9
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c15
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h2283
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h3085
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c29
-rw-r--r--drivers/pinctrl/meson/Kconfig24
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c22
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c43
-rw-r--r--drivers/pinctrl/nomadik/Kconfig6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c12
-rw-r--r--drivers/pinctrl/pinconf.h17
-rw-r--r--drivers/pinctrl/pinctrl-amd.c7
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c30
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c18
-rw-r--r--drivers/pinctrl/pinctrl-at91.c21
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c35
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c17
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c8
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c8
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c17
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c8
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c26
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c1
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c13
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c23
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c70
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c7
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c1
-rw-r--r--drivers/pinctrl/renesas/Kconfig1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c299
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c52
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c294
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h28
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c34
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h8
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c10
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c7
-rw-r--r--drivers/pinctrl/uniphier/Kconfig2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c14
-rw-r--r--drivers/scsi/dc395x.c697
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c6
-rw-r--r--drivers/scsi/fnic/fip.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h51
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c81
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c259
-rw-r--r--drivers/scsi/isci/remote_device.c30
-rw-r--r--drivers/scsi/isci/remote_device.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c73
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c2
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c22
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h12
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c129
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/scsi_debug.c361
-rw-r--r--drivers/scsi/scsi_devinfo.c27
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c140
-rw-r--r--drivers/soc/qcom/ice.c350
-rw-r--r--drivers/target/target_core_configfs.c20
-rw-r--r--drivers/target/target_core_device.c89
-rw-r--r--drivers/target/target_core_spc.c134
-rw-r--r--drivers/target/target_core_stat.c69
-rw-r--r--drivers/target/target_core_transport.c119
-rw-r--r--drivers/ufs/core/ufs-mcq.c6
-rw-r--r--drivers/ufs/core/ufs-sysfs.c133
-rw-r--r--drivers/ufs/core/ufshcd.c103
-rw-r--r--drivers/ufs/host/ufs-qcom.c181
-rw-r--r--drivers/ufs/host/ufs-qcom.h11
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c121
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h14
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c371
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h35
-rw-r--r--drivers/vfio/pci/mlx5/main.c87
-rw-r--r--drivers/vfio/vfio_iommu_type1.c51
-rw-r--r--drivers/virt/coco/Kconfig6
-rw-r--r--drivers/virt/coco/Makefile2
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c8
-rw-r--r--drivers/virt/coco/guest/Kconfig17
-rw-r--r--drivers/virt/coco/guest/Makefile4
-rw-r--r--drivers/virt/coco/guest/report.c (renamed from drivers/virt/coco/tsm.c)63
-rw-r--r--drivers/virt/coco/guest/tsm-mr.c251
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c12
-rw-r--r--drivers/virt/coco/tdx-guest/Kconfig1
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c259
396 files changed, 23314 insertions, 6214 deletions
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index 5a058e565b01..c6cf7068d23c 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -512,12 +512,6 @@ static int aie2_init(struct amdxdna_dev *xdna)
goto release_fw;
}
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret) {
- XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret);
- goto free_irq;
- }
-
psp_conf.fw_size = fw->size;
psp_conf.fw_buf = fw->data;
for (i = 0; i < PSP_MAX_REGS; i++)
@@ -526,14 +520,14 @@ static int aie2_init(struct amdxdna_dev *xdna)
if (!ndev->psp_hdl) {
XDNA_ERR(xdna, "failed to create psp");
ret = -ENOMEM;
- goto disable_sva;
+ goto free_irq;
}
xdna->dev_handle = ndev;
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
- goto disable_sva;
+ goto free_irq;
}
ret = aie2_mgmt_fw_query(ndev);
@@ -584,8 +578,6 @@ async_event_free:
aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
-disable_sva:
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
free_irq:
pci_free_irq_vectors(pdev);
release_fw:
@@ -601,7 +593,6 @@ static void aie2_fini(struct amdxdna_dev *xdna)
aie2_hw_stop(xdna);
aie2_error_async_events_free(ndev);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_free_irq_vectors(pdev);
}
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
index 26c1a4e6b6ec..47ea3ccc2142 100644
--- a/drivers/acpi/acpi_mrrm.c
+++ b/drivers/acpi/acpi_mrrm.c
@@ -157,8 +157,10 @@ static __init int add_boot_memory_ranges(void)
for (int i = 0; i < mrrm_mem_entry_num; i++) {
name = kasprintf(GFP_KERNEL, "range%d", i);
- if (!name)
+ if (!name) {
+ ret = -ENOMEM;
break;
+ }
kobj = kobject_create_and_add(name, pkobj);
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 803e3e893825..ff0802ace19b 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -168,7 +168,7 @@ void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size)
{
/* Always terminate destination string */
- memcpy(dest, source, dest_size);
+ strncpy(dest, source, dest_size);
dest[dest_size - 1] = 0;
}
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index ca3484dac5c4..fea11a35eea3 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -766,7 +766,7 @@ static int __init einj_probe(struct faux_device *fdev)
rc = einj_get_available_error_type(&available_error_type);
if (rc)
- return rc;
+ goto err_put_table;
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 3ba9d7e9a6c7..064944ae9fdc 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -27,7 +27,6 @@
#include <linux/ipmi_smi.h>
#include <linux/notifier.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
@@ -41,11 +40,12 @@
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
-static void smi_recv_work(struct work_struct *t);
+static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
+static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
@@ -180,14 +180,8 @@ MODULE_PARM_DESC(max_msgs_per_user,
struct ipmi_user {
struct list_head link;
- /*
- * Set to NULL when the user is destroyed, a pointer to myself
- * so srcu_dereference can be used on it.
- */
- struct ipmi_user *self;
- struct srcu_struct release_barrier;
-
struct kref refcount;
+ refcount_t destroyed;
/* The upper layer that handles receive messages. */
const struct ipmi_user_hndl *handler;
@@ -200,30 +194,8 @@ struct ipmi_user {
bool gets_events;
atomic_t nr_msgs;
-
- /* Free must run in process context for RCU cleanup. */
- struct work_struct remove_work;
};
-static struct workqueue_struct *remove_work_wq;
-
-static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
- __acquires(user->release_barrier)
-{
- struct ipmi_user *ruser;
-
- *index = srcu_read_lock(&user->release_barrier);
- ruser = srcu_dereference(user->self, &user->release_barrier);
- if (!ruser)
- srcu_read_unlock(&user->release_barrier, *index);
- return ruser;
-}
-
-static void release_ipmi_user(struct ipmi_user *user, int index)
-{
- srcu_read_unlock(&user->release_barrier, index);
-}
-
struct cmd_rcvr {
struct list_head link;
@@ -327,6 +299,8 @@ struct bmc_device {
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+static struct workqueue_struct *bmc_remove_work_wq;
+
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
struct ipmi_device_id *id,
bool *guid_set, guid_t *guid);
@@ -451,11 +425,10 @@ struct ipmi_smi {
struct list_head link;
/*
- * The list of upper layers that are using me. seq_lock write
- * protects this. Read protection is with srcu.
+ * The list of upper layers that are using me.
*/
struct list_head users;
- struct srcu_struct users_srcu;
+ struct mutex users_mutex;
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
@@ -496,15 +469,22 @@ struct ipmi_smi {
int curr_seq;
/*
- * Messages queued for delivery. If delivery fails (out of memory
- * for instance), They will stay in here to be processed later in a
- * periodic timer interrupt. The workqueue is for handling received
- * messages directly from the handler.
+ * Messages queued for deliver to the user.
+ */
+ struct mutex user_msgs_mutex;
+ struct list_head user_msgs;
+
+ /*
+ * Messages queued for processing. If processing fails (out
+ * of memory for instance), They will stay in here to be
+ * processed later in a periodic timer interrupt. The
+ * workqueue is for handling received messages directly from
+ * the handler.
*/
spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
- struct work_struct recv_work;
+ struct work_struct smi_work;
spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs;
@@ -522,10 +502,9 @@ struct ipmi_smi {
* Events that were queues because no one was there to receive
* them.
*/
- spinlock_t events_lock; /* For dealing with event stuff. */
+ struct mutex events_mutex; /* For dealing with event stuff. */
struct list_head waiting_events;
unsigned int waiting_events_count; /* How many events in queue? */
- char delivering_events;
char event_msg_printed;
/* How many users are waiting for events? */
@@ -613,6 +592,28 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
bool guid_set, guid_t *guid, int intf_num);
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static void free_ipmi_user(struct kref *ref)
+{
+ struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+ struct module *owner;
+
+ owner = user->intf->owner;
+ kref_put(&user->intf->refcount, intf_free);
+ module_put(owner);
+ vfree(user);
+}
+
+static void release_ipmi_user(struct ipmi_user *user)
+{
+ kref_put(&user->refcount, free_ipmi_user);
+}
+
+static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
+{
+ if (!kref_get_unless_zero(&user->refcount))
+ return NULL;
+ return user;
+}
/*
* The driver model view of the IPMI messaging driver.
@@ -630,9 +631,6 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-#define ipmi_interfaces_mutex_held() \
- lockdep_is_held(&ipmi_interfaces_mutex)
-static struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@@ -698,27 +696,20 @@ static void free_smi_msg_list(struct list_head *q)
}
}
-static void clean_up_interface_data(struct ipmi_smi *intf)
+static void intf_free(struct kref *ref)
{
+ struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
int i;
struct cmd_rcvr *rcvr, *rcvr2;
- struct list_head list;
-
- cancel_work_sync(&intf->recv_work);
free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
/*
* Wholesale remove all the entries from the list in the
- * interface and wait for RCU to know that none are in use.
+ * interface. No need for locks, this is single-threaded.
*/
- mutex_lock(&intf->cmd_rcvrs_mutex);
- INIT_LIST_HEAD(&list);
- list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
- mutex_unlock(&intf->cmd_rcvrs_mutex);
-
- list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+ list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
kfree(rcvr);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
@@ -726,20 +717,17 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
&& (intf->seq_table[i].recv_msg))
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
-}
-
-static void intf_free(struct kref *ref)
-{
- struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
- clean_up_interface_data(intf);
kfree(intf);
}
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
- int index, rv;
+ unsigned int count = 0, i;
+ int *interfaces = NULL;
+ struct device **devices = NULL;
+ int rv = 0;
/*
* Make sure the driver is actually initialized, this handles
@@ -753,20 +741,53 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
list_add(&watcher->link, &smi_watchers);
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
- lockdep_is_held(&smi_watchers_mutex)) {
- int intf_num = READ_ONCE(intf->intf_num);
+ /*
+ * Build an array of ipmi interfaces and fill it in, and
+ * another array of the devices. We can't call the callback
+ * with ipmi_interfaces_mutex held. smi_watchers_mutex will
+ * keep things in order for the user.
+ */
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link)
+ count++;
+ if (count > 0) {
+ interfaces = kmalloc_array(count, sizeof(*interfaces),
+ GFP_KERNEL);
+ if (!interfaces) {
+ rv = -ENOMEM;
+ } else {
+ devices = kmalloc_array(count, sizeof(*devices),
+ GFP_KERNEL);
+ if (!devices) {
+ kfree(interfaces);
+ interfaces = NULL;
+ rv = -ENOMEM;
+ }
+ }
+ count = 0;
+ }
+ if (interfaces) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ int intf_num = READ_ONCE(intf->intf_num);
- if (intf_num == -1)
- continue;
- watcher->new_smi(intf_num, intf->si_dev);
+ if (intf_num == -1)
+ continue;
+ devices[count] = intf->si_dev;
+ interfaces[count++] = intf_num;
+ }
+ }
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ if (interfaces) {
+ for (i = 0; i < count; i++)
+ watcher->new_smi(interfaces[i], devices[i]);
+ kfree(interfaces);
+ kfree(devices);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
mutex_unlock(&smi_watchers_mutex);
- return 0;
+ return rv;
}
EXPORT_SYMBOL(ipmi_smi_watcher_register);
@@ -779,22 +800,17 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
}
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
-/*
- * Must be called with smi_watchers_mutex held.
- */
static void
call_smi_watchers(int i, struct device *dev)
{
struct ipmi_smi_watcher *w;
- mutex_lock(&smi_watchers_mutex);
list_for_each_entry(w, &smi_watchers, link) {
if (try_module_get(w->owner)) {
w->new_smi(i, dev);
module_put(w->owner);
}
}
- mutex_unlock(&smi_watchers_mutex);
}
static int
@@ -941,18 +957,14 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
ipmi_free_recv_msg(msg);
atomic_dec(&msg->user->nr_msgs);
} else {
- int index;
- struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
-
- if (user) {
- atomic_dec(&user->nr_msgs);
- user->handler->ipmi_recv_hndl(msg, user->handler_data);
- release_ipmi_user(user, index);
- } else {
- /* User went away, give up. */
- ipmi_free_recv_msg(msg);
- rv = -EINVAL;
- }
+ /*
+ * Deliver it in smi_work. The message will hold a
+ * refcount to the user.
+ */
+ mutex_lock(&intf->user_msgs_mutex);
+ list_add_tail(&msg->link, &intf->user_msgs);
+ mutex_unlock(&intf->user_msgs_mutex);
+ queue_work(system_wq, &intf->smi_work);
}
return rv;
@@ -1192,23 +1204,14 @@ static int intf_err_seq(struct ipmi_smi *intf,
return rv;
}
-static void free_user_work(struct work_struct *work)
-{
- struct ipmi_user *user = container_of(work, struct ipmi_user,
- remove_work);
-
- cleanup_srcu_struct(&user->release_barrier);
- vfree(user);
-}
-
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
struct ipmi_user **user)
{
unsigned long flags;
- struct ipmi_user *new_user;
- int rv, index;
+ struct ipmi_user *new_user = NULL;
+ int rv = 0;
struct ipmi_smi *intf;
/*
@@ -1230,30 +1233,31 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = vzalloc(sizeof(*new_user));
- if (!new_user)
- return -ENOMEM;
-
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (intf->intf_num == if_num)
goto found;
}
/* Not found, return an error */
rv = -EINVAL;
- goto out_kfree;
+ goto out_unlock;
found:
+ if (intf->in_shutdown) {
+ rv = -ENODEV;
+ goto out_unlock;
+ }
+
if (atomic_add_return(1, &intf->nr_users) > max_users) {
rv = -EBUSY;
goto out_kfree;
}
- INIT_WORK(&new_user->remove_work, free_user_work);
-
- rv = init_srcu_struct(&new_user->release_barrier);
- if (rv)
+ new_user = vzalloc(sizeof(*new_user));
+ if (!new_user) {
+ rv = -ENOMEM;
goto out_kfree;
+ }
if (!try_module_get(intf->owner)) {
rv = -ENODEV;
@@ -1265,64 +1269,58 @@ int ipmi_create_user(unsigned int if_num,
atomic_set(&new_user->nr_msgs, 0);
kref_init(&new_user->refcount);
+ refcount_set(&new_user->destroyed, 1);
+ kref_get(&new_user->refcount); /* Destroy owns a refcount. */
new_user->handler = handler;
new_user->handler_data = handler_data;
new_user->intf = intf;
new_user->gets_events = false;
- rcu_assign_pointer(new_user->self, new_user);
+ mutex_lock(&intf->users_mutex);
spin_lock_irqsave(&intf->seq_lock, flags);
- list_add_rcu(&new_user->link, &intf->users);
+ list_add(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->users_mutex);
+
if (handler->ipmi_watchdog_pretimeout)
/* User wants pretimeouts, so make sure to watch for them. */
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- *user = new_user;
- return 0;
out_kfree:
- atomic_dec(&intf->nr_users);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- vfree(new_user);
+ if (rv) {
+ atomic_dec(&intf->nr_users);
+ vfree(new_user);
+ } else {
+ *user = new_user;
+ }
+out_unlock:
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
- int rv, index;
+ int rv = -EINVAL;
struct ipmi_smi *intf;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- if (intf->intf_num == if_num)
- goto found;
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num) {
+ if (!intf->handlers->get_smi_info)
+ rv = -ENOTTY;
+ else
+ rv = intf->handlers->get_smi_info(intf->send_info, data);
+ break;
+ }
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
-
- /* Not found, return an error */
- return -EINVAL;
-
-found:
- if (!intf->handlers->get_smi_info)
- rv = -ENOTTY;
- else
- rv = intf->handlers->get_smi_info(intf->send_info, data);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_get_smi_info);
-static void free_user(struct kref *ref)
-{
- struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-
- /* SRCU cleanup must happen in workqueue context. */
- queue_work(remove_work_wq, &user->remove_work);
-}
-
+/* Must be called with intf->users_mutex held. */
static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
@@ -1330,21 +1328,10 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- struct module *owner;
+ struct ipmi_recv_msg *msg, *msg2;
- if (!acquire_ipmi_user(user, &i)) {
- /*
- * The user has already been cleaned up, just make sure
- * nothing is using it and return.
- */
- synchronize_srcu(&user->release_barrier);
+ if (!refcount_dec_if_one(&user->destroyed))
return;
- }
-
- rcu_assign_pointer(user->self, NULL);
- release_ipmi_user(user, i);
-
- synchronize_srcu(&user->release_barrier);
if (user->handler->shutdown)
user->handler->shutdown(user->handler_data);
@@ -1355,11 +1342,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
if (user->gets_events)
atomic_dec(&intf->event_waiters);
- /* Remove the user from the interface's sequence table. */
- spin_lock_irqsave(&intf->seq_lock, flags);
- list_del_rcu(&user->link);
+ /* Remove the user from the interface's list and sequence table. */
+ list_del(&user->link);
atomic_dec(&intf->nr_users);
+ spin_lock_irqsave(&intf->seq_lock, flags);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1374,7 +1361,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
* Remove the user from the command receiver's table. First
* we build a list of everything (not using the standard link,
* since other things may be using it till we do
- * synchronize_srcu()) then free everything in that list.
+ * synchronize_rcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
@@ -1386,23 +1373,33 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
}
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
- synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
}
- owner = intf->owner;
- kref_put(&intf->refcount, intf_free);
- module_put(owner);
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ if (msg->user != user)
+ continue;
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ release_ipmi_user(user);
}
void ipmi_destroy_user(struct ipmi_user *user)
{
+ struct ipmi_smi *intf = user->intf;
+
+ mutex_lock(&intf->users_mutex);
_ipmi_destroy_user(user);
+ mutex_unlock(&intf->users_mutex);
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
}
EXPORT_SYMBOL(ipmi_destroy_user);
@@ -1411,9 +1408,9 @@ int ipmi_get_version(struct ipmi_user *user,
unsigned char *minor)
{
struct ipmi_device_id id;
- int rv, index;
+ int rv;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1422,7 +1419,7 @@ int ipmi_get_version(struct ipmi_user *user,
*major = ipmi_version_major(&id);
*minor = ipmi_version_minor(&id);
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1432,9 +1429,9 @@ int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1444,7 +1441,7 @@ int ipmi_set_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].address = address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1454,9 +1451,9 @@ int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1466,7 +1463,7 @@ int ipmi_get_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1476,9 +1473,9 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1488,7 +1485,7 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].lun = LUN & 0x3;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1498,9 +1495,9 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1510,7 +1507,7 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].lun;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1518,17 +1515,17 @@ EXPORT_SYMBOL(ipmi_get_my_LUN);
int ipmi_get_maintenance_mode(struct ipmi_user *user)
{
- int mode, index;
+ int mode;
unsigned long flags;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
mode = user->intf->maintenance_mode;
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return mode;
}
@@ -1543,11 +1540,11 @@ static void maintenance_mode_update(struct ipmi_smi *intf)
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
{
- int rv = 0, index;
+ int rv = 0;
unsigned long flags;
struct ipmi_smi *intf = user->intf;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1577,7 +1574,7 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
}
out_unlock:
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1585,19 +1582,17 @@ EXPORT_SYMBOL(ipmi_set_maintenance_mode);
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
{
- unsigned long flags;
struct ipmi_smi *intf = user->intf;
struct ipmi_recv_msg *msg, *msg2;
struct list_head msgs;
- int index;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
if (user->gets_events == val)
goto out;
@@ -1610,13 +1605,6 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
atomic_dec(&intf->event_waiters);
}
- if (intf->delivering_events)
- /*
- * Another thread is delivering events for this, so
- * let it handle any new events.
- */
- goto out;
-
/* Deliver any queued events. */
while (user->gets_events && !list_empty(&intf->waiting_events)) {
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
@@ -1627,22 +1615,16 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
intf->event_msg_printed = 0;
}
- intf->delivering_events = 1;
- spin_unlock_irqrestore(&intf->events_lock, flags);
-
list_for_each_entry_safe(msg, msg2, &msgs, link) {
msg->user = user;
kref_get(&user->refcount);
deliver_local_response(intf, msg);
}
-
- spin_lock_irqsave(&intf->events_lock, flags);
- intf->delivering_events = 0;
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
- release_ipmi_user(user, index);
+ mutex_unlock(&intf->events_mutex);
+ release_ipmi_user(user);
return 0;
}
@@ -1687,9 +1669,9 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
{
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
- int rv = 0, index;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1719,7 +1701,7 @@ out_unlock:
if (rv)
kfree(rcvr);
out_release:
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1733,9 +1715,9 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- int i, rv = -ENOENT, index;
+ int i, rv = -ENOENT;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1758,7 +1740,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
while (rcvrs) {
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
rcvr = rcvrs;
@@ -1882,13 +1864,12 @@ static void smi_send(struct ipmi_smi *intf,
const struct ipmi_smi_handlers *handlers,
struct ipmi_smi_msg *smi_msg, int priority)
{
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
unsigned long flags = 0;
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
@@ -2304,6 +2285,7 @@ static int i_ipmi_request(struct ipmi_user *user,
{
struct ipmi_smi_msg *smi_msg;
struct ipmi_recv_msg *recv_msg;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
if (user) {
@@ -2337,7 +2319,8 @@ static int i_ipmi_request(struct ipmi_user *user,
}
}
- rcu_read_lock();
+ if (!run_to_completion)
+ mutex_lock(&intf->users_mutex);
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
@@ -2383,7 +2366,8 @@ out_err:
smi_send(intf, intf->handlers, smi_msg, priority);
}
- rcu_read_unlock();
+ if (!run_to_completion)
+ mutex_unlock(&intf->users_mutex);
out:
if (rv && user)
@@ -2414,12 +2398,12 @@ int ipmi_request_settime(struct ipmi_user *user,
unsigned int retry_time_ms)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2438,7 +2422,7 @@ int ipmi_request_settime(struct ipmi_user *user,
retries,
retry_time_ms);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_settime);
@@ -2453,12 +2437,12 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
int priority)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2477,7 +2461,7 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
lun,
-1, 0);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_supply_msgs);
@@ -3064,7 +3048,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
- queue_work(remove_work_wq, &bmc->remove_work);
+ queue_work(bmc_remove_work_wq, &bmc->remove_work);
}
/*
@@ -3520,15 +3504,14 @@ static ssize_t nr_msgs_show(struct device *dev,
char *buf)
{
struct ipmi_smi *intf = container_of(attr,
- struct ipmi_smi, nr_msgs_devattr);
+ struct ipmi_smi, nr_msgs_devattr);
struct ipmi_user *user;
- int index;
unsigned int count = 0;
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link)
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link)
count += atomic_read(&user->nr_msgs);
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
return sysfs_emit(buf, "%u\n", count);
}
@@ -3569,12 +3552,6 @@ int ipmi_add_smi(struct module *owner,
if (!intf)
return -ENOMEM;
- rv = init_srcu_struct(&intf->users_srcu);
- if (rv) {
- kfree(intf);
- return rv;
- }
-
intf->owner = owner;
intf->bmc = &intf->tmp_bmc;
INIT_LIST_HEAD(&intf->bmc->intfs);
@@ -3591,7 +3568,10 @@ int ipmi_add_smi(struct module *owner,
}
if (slave_addr != 0)
intf->addrinfo[0].address = slave_addr;
+ INIT_LIST_HEAD(&intf->user_msgs);
+ mutex_init(&intf->user_msgs_mutex);
INIT_LIST_HEAD(&intf->users);
+ mutex_init(&intf->users_mutex);
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
@@ -3603,12 +3583,12 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- INIT_WORK(&intf->recv_work, smi_recv_work);
+ INIT_WORK(&intf->smi_work, smi_work);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
- spin_lock_init(&intf->events_lock);
+ mutex_init(&intf->events_mutex);
spin_lock_init(&intf->watch_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -3621,12 +3601,16 @@ int ipmi_add_smi(struct module *owner,
for (i = 0; i < IPMI_NUM_STATS; i++)
atomic_set(&intf->stats[i], 0);
+ /*
+ * Grab the watchers mutex so we can deliver the new interface
+ * without races.
+ */
+ mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
/* Look for a hole in the numbers. */
i = 0;
link = &ipmi_interfaces;
- list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
- ipmi_interfaces_mutex_held()) {
+ list_for_each_entry(tintf, &ipmi_interfaces, link) {
if (tintf->intf_num != i) {
link = &tintf->link;
break;
@@ -3635,9 +3619,9 @@ int ipmi_add_smi(struct module *owner,
}
/* Add the new interface in numeric order. */
if (i == 0)
- list_add_rcu(&intf->link, &ipmi_interfaces);
+ list_add(&intf->link, &ipmi_interfaces);
else
- list_add_tail_rcu(&intf->link, link);
+ list_add_tail(&intf->link, link);
rv = handlers->start_processing(send_info, intf);
if (rv)
@@ -3669,18 +3653,14 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
- /*
- * Keep memory order straight for RCU readers. Make
- * sure everything else is committed to memory before
- * setting intf_num to mark the interface valid.
- */
- smp_wmb();
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
/* After this point the interface is legal to use. */
call_smi_watchers(i, intf->si_dev);
+ mutex_unlock(&smi_watchers_mutex);
+
return 0;
out_err_bmc_reg:
@@ -3689,10 +3669,9 @@ int ipmi_add_smi(struct module *owner,
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
out_err:
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- cleanup_srcu_struct(&intf->users_srcu);
+ mutex_unlock(&smi_watchers_mutex);
kref_put(&intf->refcount, intf_free);
return rv;
@@ -3758,19 +3737,28 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
void ipmi_unregister_smi(struct ipmi_smi *intf)
{
struct ipmi_smi_watcher *w;
- int intf_num, index;
+ int intf_num;
if (!intf)
return;
+
intf_num = intf->intf_num;
mutex_lock(&ipmi_interfaces_mutex);
+ cancel_work_sync(&intf->smi_work);
+ /* smi_work() can no longer be in progress after this. */
+
intf->intf_num = -1;
intf->in_shutdown = true;
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- /* At this point no users can be added to the interface. */
+ /*
+ * At this point no users can be added to the interface and no
+ * new messages can be sent.
+ */
+
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3784,24 +3772,19 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
w->smi_gone(intf_num);
mutex_unlock(&smi_watchers_mutex);
- index = srcu_read_lock(&intf->users_srcu);
+ mutex_lock(&intf->users_mutex);
while (!list_empty(&intf->users)) {
- struct ipmi_user *user =
- container_of(list_next_rcu(&intf->users),
- struct ipmi_user, link);
+ struct ipmi_user *user = list_first_entry(&intf->users,
+ struct ipmi_user, link);
_ipmi_destroy_user(user);
}
- srcu_read_unlock(&intf->users_srcu, index);
-
- if (intf->handlers->shutdown)
- intf->handlers->shutdown(intf->send_info);
+ mutex_unlock(&intf->users_mutex);
cleanup_smi_msgs(intf);
ipmi_bmc_unregister(intf);
- cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
}
EXPORT_SYMBOL(ipmi_unregister_smi);
@@ -3926,17 +3909,12 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
msg->data_size, msg->data);
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -3946,7 +3924,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
@@ -4017,17 +3995,12 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5;
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -4037,7 +4010,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
@@ -4206,14 +4179,33 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcu_read_unlock();
if (user == NULL) {
- /* We didn't find a user, just give up. */
+ /* We didn't find a user, just give up and return an error. */
ipmi_inc_stat(intf, unhandled_commands);
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = chan;
+ msg->data[3] = msg->rsp[4]; /* handle */
+ msg->data[4] = msg->rsp[8]; /* rsSWID */
+ msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3);
+ msg->data[6] = ipmb_checksum(&msg->data[3], 3);
+ msg->data[7] = msg->rsp[5]; /* rqSWID */
+ /* rqseq/lun */
+ msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3);
+ msg->data[9] = cmd;
+ msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[11] = ipmb_checksum(&msg->data[7], 4);
+ msg->data_size = 12;
+
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+ smi_send(intf, intf->handlers, msg, 0);
/*
- * Don't do anything with these messages, just allow
- * them to be freed.
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
*/
- rv = 0;
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -4222,7 +4214,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
* message, so requeue it for handling later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
@@ -4331,7 +4323,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/*
* OEM Messages are expected to be delivered via
@@ -4393,8 +4385,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg, *recv_msg2;
struct list_head msgs;
struct ipmi_user *user;
- int rv = 0, deliver_count = 0, index;
- unsigned long flags;
+ int rv = 0, deliver_count = 0;
if (msg->rsp_size < 19) {
/* Message is too small to be an IPMB event. */
@@ -4409,7 +4400,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
ipmi_inc_stat(intf, events);
@@ -4417,18 +4408,20 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* Allocate and fill in one message for every user that is
* getting events.
*/
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
if (!user->gets_events)
continue;
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
- rcu_read_unlock();
+ mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
+ user = recv_msg->user;
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
+ kref_put(&user->refcount, free_ipmi_user);
}
/*
* We couldn't allocate memory for the
@@ -4446,7 +4439,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
if (deliver_count) {
/* Now deliver all the messages. */
@@ -4484,7 +4477,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
+ mutex_unlock(&intf->events_mutex);
return rv;
}
@@ -4570,7 +4563,7 @@ return_unspecified:
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
- if (intf->in_shutdown)
+ if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
@@ -4642,6 +4635,9 @@ return_unspecified:
*/
struct ipmi_recv_msg *recv_msg;
+ if (intf->run_to_completion)
+ goto out;
+
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
@@ -4664,6 +4660,9 @@ process_response_response:
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
struct ipmi_channel *chans;
+ if (intf->run_to_completion)
+ goto out;
+
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
@@ -4738,6 +4737,9 @@ process_response_response:
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
/* It's an asynchronous event. */
+ if (intf->run_to_completion)
+ goto out;
+
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
@@ -4753,10 +4755,10 @@ process_response_response:
*/
static void handle_new_recv_msgs(struct ipmi_smi *intf)
{
- struct ipmi_smi_msg *smi_msg;
- unsigned long flags = 0;
- int rv;
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/* See if any waiting messages need to be processed. */
if (!run_to_completion)
@@ -4790,31 +4792,15 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
}
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
-
- /*
- * If the pretimout count is non-zero, decrement one from it and
- * deliver pretimeouts to all the users.
- */
- if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
- struct ipmi_user *user;
- int index;
-
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (user->handler->ipmi_watchdog_pretimeout)
- user->handler->ipmi_watchdog_pretimeout(
- user->handler_data);
- }
- srcu_read_unlock(&intf->users_srcu, index);
- }
}
-static void smi_recv_work(struct work_struct *t)
+static void smi_work(struct work_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = from_work(intf, t, recv_work);
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi *intf = from_work(intf, t, smi_work);
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
+ struct ipmi_recv_msg *msg, *msg2;
/*
* Start the next message if available.
@@ -4824,8 +4810,6 @@ static void smi_recv_work(struct work_struct *t)
* message delivery.
*/
- rcu_read_lock();
-
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4843,15 +4827,57 @@ static void smi_recv_work(struct work_struct *t)
intf->curr_msg = newmsg;
}
}
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
if (newmsg)
intf->handlers->sender(intf->send_info, newmsg);
- rcu_read_unlock();
-
handle_new_recv_msgs(intf);
+
+ /* Nothing below applies during panic time. */
+ if (run_to_completion)
+ return;
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ struct ipmi_user *user;
+
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ mutex_unlock(&intf->users_mutex);
+ }
+
+ /*
+ * Freeing the message can cause a user to be released, which
+ * can then cause the interface to be freed. Make sure that
+ * doesn't happen until we are ready.
+ */
+ kref_get(&intf->refcount);
+
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ struct ipmi_user *user = msg->user;
+
+ list_del(&msg->link);
+
+ if (refcount_read(&user->destroyed) == 0) {
+ ipmi_free_recv_msg(msg);
+ } else {
+ atomic_dec(&user->nr_msgs);
+ user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ }
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ kref_put(&intf->refcount, intf_free);
}
/* Handle a new message from the lower layer. */
@@ -4859,7 +4885,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/*
* To preserve message order, we keep a queue and deliver from
@@ -4884,9 +4910,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_work(&intf->recv_work);
+ smi_work(&intf->smi_work);
else
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
@@ -4896,7 +4922,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -5065,7 +5091,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags);
}
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
return need_timer;
}
@@ -5084,17 +5110,19 @@ static struct timer_list ipmi_timer;
static atomic_t stop_operation;
-static void ipmi_timeout(struct timer_list *unused)
+static void ipmi_timeout_work(struct work_struct *work)
{
+ if (atomic_read(&stop_operation))
+ return;
+
struct ipmi_smi *intf;
bool need_timer = false;
- int index;
if (atomic_read(&stop_operation))
return;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (atomic_read(&intf->event_waiters)) {
intf->ticks_to_req_ev--;
if (intf->ticks_to_req_ev == 0) {
@@ -5106,12 +5134,22 @@ static void ipmi_timeout(struct timer_list *unused)
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
if (need_timer)
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
+static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work);
+
+static void ipmi_timeout(struct timer_list *unused)
+{
+ if (atomic_read(&stop_operation))
+ return;
+
+ queue_work(system_wq, &ipmi_timer_work);
+}
+
static void need_waiter(struct ipmi_smi *intf)
{
/* Racy, but worst case we start the timer twice. */
@@ -5168,7 +5206,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
if (msg->user && !oops_in_progress)
- kref_put(&msg->user->refcount, free_user);
+ kref_put(&msg->user->refcount, free_ipmi_user);
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
@@ -5188,9 +5226,9 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
/*
* Inside a panic, send a message and wait for a response.
*/
-static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
- struct ipmi_addr *addr,
- struct kernel_ipmi_msg *msg)
+static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
{
struct ipmi_smi_msg smi_msg;
struct ipmi_recv_msg recv_msg;
@@ -5220,6 +5258,15 @@ static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
ipmi_poll(intf);
}
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ user->intf->run_to_completion = 1;
+ _ipmi_panic_request_and_wait(user->intf, addr, msg);
+}
+EXPORT_SYMBOL(ipmi_panic_request_and_wait);
+
static void event_receiver_fetcher(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg)
{
@@ -5288,7 +5335,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
}
/* Send the event announcing the panic. */
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
/*
* On every interface, dump a bunch of OEM event holding the
@@ -5324,7 +5371,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -5333,7 +5380,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -5385,7 +5432,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
memcpy_and_pad(data+5, 11, p, size, '\0');
p += size;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
@@ -5403,7 +5450,7 @@ static int panic_event(struct notifier_block *this,
has_panicked = 1;
/* For every registered interface, set it to run to completion. */
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (!intf->handlers || intf->intf_num == -1)
/* Interface is not ready. */
continue;
@@ -5433,7 +5480,7 @@ static int panic_event(struct notifier_block *this,
intf->handlers->set_run_to_completion(intf->send_info,
1);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ list_for_each_entry(user, &intf->users, link) {
if (user->handler->ipmi_panic_handler)
user->handler->ipmi_panic_handler(
user->handler_data);
@@ -5478,15 +5525,11 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
- rv = init_srcu_struct(&ipmi_interfaces_srcu);
- if (rv)
- goto out;
-
- remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
- if (!remove_work_wq) {
+ bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!bmc_remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
- goto out_wq;
+ goto out;
}
timer_setup(&ipmi_timer, ipmi_timeout, 0);
@@ -5496,9 +5539,6 @@ static int ipmi_init_msghandler(void)
initialized = true;
-out_wq:
- if (rv)
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
@@ -5522,7 +5562,7 @@ static void __exit cleanup_ipmi(void)
int count;
if (initialized) {
- destroy_workqueue(remove_work_wq);
+ destroy_workqueue(bmc_remove_work_wq);
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);
@@ -5539,6 +5579,7 @@ static void __exit cleanup_ipmi(void)
*/
atomic_set(&stop_operation, 1);
timer_delete_sync(&ipmi_timer);
+ cancel_work_sync(&ipmi_timer_work);
initialized = false;
@@ -5549,8 +5590,6 @@ static void __exit cleanup_ipmi(void)
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
pr_warn("recv message count %d at exit\n", count);
-
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
}
if (drvregistered)
driver_unregister(&ipmidriver.driver);
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index a7ead2a4c753..508c3fd45877 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -26,6 +26,14 @@ enum si_type {
/* Array is defined in the ipmi_si_intf.c */
extern const char *const si_to_str[];
+struct ipmi_match_info {
+ enum si_type type;
+};
+
+extern const struct ipmi_match_info ipmi_kcs_si_info;
+extern const struct ipmi_match_info ipmi_smic_si_info;
+extern const struct ipmi_match_info ipmi_bt_si_info;
+
enum ipmi_addr_space {
IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE
};
@@ -64,7 +72,7 @@ struct si_sm_io {
void (*irq_cleanup)(struct si_sm_io *io);
u8 slave_addr;
- enum si_type si_type;
+ const struct ipmi_match_info *si_info;
struct device *dev;
};
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 12b0b77eb1cc..7fe891783a37 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -73,6 +73,10 @@ enum si_intf_state {
/* 'invalid' to allow a firmware-specified interface to be disabled */
const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
+const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS };
+const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC };
+const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT };
+
static bool initialized;
/*
@@ -692,7 +696,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
@@ -1119,7 +1123,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data)
struct smi_info *smi_info = data;
unsigned long flags;
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
@@ -1164,7 +1168,7 @@ static int smi_start_processing(void *send_info,
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
- else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
+ else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq)
enable = 1;
if (enable) {
@@ -1235,7 +1239,7 @@ MODULE_PARM_DESC(kipmid_max_busy_us,
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Enable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1243,7 +1247,7 @@ void ipmi_irq_finish_setup(struct si_sm_io *io)
void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Disable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
@@ -1614,7 +1618,7 @@ static ssize_t type_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]);
}
static DEVICE_ATTR_RO(type);
@@ -1649,7 +1653,7 @@ static ssize_t params_show(struct device *dev,
return sysfs_emit(buf,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
- si_to_str[smi_info->io.si_type],
+ si_to_str[smi_info->io.si_info->type],
addr_space_to_str[smi_info->io.addr_space],
smi_info->io.addr_data,
smi_info->io.regspacing,
@@ -1803,7 +1807,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
- smi_info->io.si_type == SI_BT)
+ smi_info->io.si_info->type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
@@ -1907,13 +1911,13 @@ int ipmi_si_add_smi(struct si_sm_io *io)
/* We prefer ACPI over SMBIOS. */
dev_info(dup->io.dev,
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
cleanup_one_si(dup);
} else {
dev_info(new_smi->io.dev,
"%s-specified %s state machine: duplicate\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
rv = -EBUSY;
kfree(new_smi);
goto out_err;
@@ -1922,7 +1926,7 @@ int ipmi_si_add_smi(struct si_sm_io *io)
pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
list_add_tail(&new_smi->link, &smi_infos);
@@ -1945,12 +1949,12 @@ static int try_smi_init(struct smi_info *new_smi)
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type],
+ si_to_str[new_smi->io.si_info->type],
addr_space_to_str[new_smi->io.addr_space],
new_smi->io.addr_data,
new_smi->io.slave_addr, new_smi->io.irq);
- switch (new_smi->io.si_type) {
+ switch (new_smi->io.si_info->type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
@@ -2073,7 +2077,7 @@ static int try_smi_init(struct smi_info *new_smi)
smi_num++;
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
@@ -2091,9 +2095,18 @@ static int try_smi_init(struct smi_info *new_smi)
return rv;
}
+/*
+ * Devices in the same address space at the same address are the same.
+ */
+static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
+{
+ return (e1->io.addr_space == e2->io.addr_space &&
+ e1->io.addr_data == e2->io.addr_data);
+}
+
static int __init init_ipmi_si(void)
{
- struct smi_info *e;
+ struct smi_info *e, *e2;
enum ipmi_addr_src type = SI_INVALID;
if (initialized)
@@ -2109,37 +2122,70 @@ static int __init init_ipmi_si(void)
ipmi_si_parisc_init();
- /* We prefer devices with interrupts, but in the case of a machine
- with multiple BMCs we assume that there will be several instances
- of a given type so if we succeed in registering a type then also
- try to register everything else of the same type */
mutex_lock(&smi_infos_lock);
+
+ /*
+ * Scan through all the devices. We prefer devices with
+ * interrupts, so go through those first in case there are any
+ * duplicates that don't have the interrupt set.
+ */
list_for_each_entry(e, &smi_infos, link) {
- /* Try to register a device if it has an IRQ and we either
- haven't successfully registered a device yet or this
- device has the same type as one we successfully registered */
- if (e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ bool dup = false;
+
+ /* Register ones with interrupts first. */
+ if (!e->io.irq)
+ continue;
+
+ /*
+ * Go through the ones we have already seen to see if this
+ * is a dup.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (e2->io.irq && ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
- /* type will only have been set if we successfully registered an si */
- if (type)
- goto skip_fallback_noirq;
+ /*
+ * Now try devices without interrupts.
+ */
+ list_for_each_entry(e, &smi_infos, link) {
+ bool dup = false;
- /* Fall back to the preferred device */
+ if (e->io.irq)
+ continue;
- list_for_each_entry(e, &smi_infos, link) {
- if (!e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ /*
+ * Go through the ones we have already seen to see if
+ * this is a dup. We have already looked at the ones
+ * with interrupts.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (!e2->io.irq)
+ continue;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
+ }
+ }
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
-skip_fallback_noirq:
initialized = true;
mutex_unlock(&smi_infos_lock);
@@ -2267,7 +2313,7 @@ struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
if (e->io.addr_space != addr_space)
continue;
- if (e->io.si_type != si_type)
+ if (e->io.si_info->type != si_type)
continue;
if (e->io.addr_data == addr) {
dev = get_device(e->io.dev);
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 2be2967f6b5f..3b0a70d9adbb 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -13,7 +13,7 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
memset(&io, 0, sizeof(io));
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
io.addr_source = SI_DEVICETREE;
io.addr_space = IPMI_MEM_ADDR_SPACE;
io.addr_data = dev->hpa.start;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 8c0ea637aba0..17f72763322d 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -23,30 +23,32 @@ MODULE_PARM_DESC(trypci,
static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
{
- if (io->si_type == SI_KCS) {
- unsigned char status;
- int regspacing;
-
- io->regsize = DEFAULT_REGSIZE;
- io->regshift = 0;
-
- /* detect 1, 4, 16byte spacing */
- for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
- io->regspacing = regspacing;
- if (io->io_setup(io)) {
- dev_err(io->dev, "Could not setup I/O space\n");
- return DEFAULT_REGSPACING;
- }
- /* write invalid cmd */
- io->outputb(io, 1, 0x10);
- /* read status back */
- status = io->inputb(io, 1);
- io->io_cleanup(io);
- if (status)
- return regspacing;
- regspacing *= 4;
+ unsigned char status;
+ int regspacing;
+
+ if (io->si_info->type != SI_KCS)
+ return DEFAULT_REGSPACING;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev, "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
}
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
}
+
return DEFAULT_REGSPACING;
}
@@ -74,15 +76,15 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
switch (pdev->class) {
case PCI_CLASS_SERIAL_IPMI_SMIC:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_KCS:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_BT:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
default:
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 550cabd43ae6..fb6e359ae494 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -163,9 +163,13 @@ static int platform_ipmi_probe(struct platform_device *pdev)
switch (type) {
case SI_KCS:
+ io.si_info = &ipmi_kcs_si_info;
+ break;
case SI_SMIC:
+ io.si_info = &ipmi_smic_si_info;
+ break;
case SI_BT:
- io.si_type = type;
+ io.si_info = &ipmi_bt_si_info;
break;
case SI_TYPE_INVALID: /* User disabled this in hardcode. */
return -ENODEV;
@@ -213,13 +217,10 @@ static int platform_ipmi_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id of_ipmi_match[] = {
- { .type = "ipmi", .compatible = "ipmi-kcs",
- .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic",
- .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt",
- .data = (void *)(unsigned long) SI_BT },
- {},
+ { .type = "ipmi", .compatible = "ipmi-kcs", .data = &ipmi_kcs_si_info },
+ { .type = "ipmi", .compatible = "ipmi-smic", .data = &ipmi_smic_si_info },
+ { .type = "ipmi", .compatible = "ipmi-bt", .data = &ipmi_bt_si_info },
+ {}
};
MODULE_DEVICE_TABLE(of, of_ipmi_match);
@@ -265,7 +266,7 @@ static int of_ipmi_probe(struct platform_device *pdev)
}
memset(&io, 0, sizeof(io));
- io.si_type = (enum si_type)device_get_match_data(&pdev->dev);
+ io.si_info = device_get_match_data(&pdev->dev);
io.addr_source = SI_DEVICETREE;
io.irq_setup = ipmi_std_irq_setup;
@@ -296,7 +297,7 @@ static int find_slave_address(struct si_sm_io *io, int slave_addr)
{
#ifdef CONFIG_IPMI_DMI_DECODE
if (!slave_addr)
- slave_addr = ipmi_dmi_get_slave_addr(io->si_type,
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_info->type,
io->addr_space,
io->addr_data);
#endif
@@ -335,13 +336,13 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
switch (tmp) {
case 1:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case 2:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case 3:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
case 4: /* SSIF, just ignore */
return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0b45b07dec22..5bf038e620c7 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -481,8 +481,6 @@ static int ipmi_ssif_thread(void *data)
/* Wait for something to do */
result = wait_for_completion_interruptible(
&ssif_info->wake_thread);
- if (ssif_info->stopping)
- break;
if (result == -ERESTARTSYS)
continue;
init_completion(&ssif_info->wake_thread);
@@ -1270,10 +1268,8 @@ static void shutdown_ssif(void *send_info)
ssif_info->stopping = true;
timer_delete_sync(&ssif_info->watch_timer);
timer_delete_sync(&ssif_info->retry_timer);
- if (ssif_info->thread) {
- complete(&ssif_info->wake_thread);
+ if (ssif_info->thread)
kthread_stop(ssif_info->thread);
- }
}
static void ssif_remove(struct i2c_client *client)
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f1875b2bebbc..ab759b492fdd 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -150,7 +150,7 @@ static char preaction[16] = "pre_none";
static unsigned char preop_val = WDOG_PREOP_NONE;
static char preop[16] = "preop_none";
-static DEFINE_SPINLOCK(ipmi_read_lock);
+static DEFINE_MUTEX(ipmi_read_mutex);
static char data_to_read;
static DECLARE_WAIT_QUEUE_HEAD(read_q);
static struct fasync_struct *fasync_q;
@@ -363,7 +363,7 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
{
struct kernel_ipmi_msg msg;
unsigned char data[6];
- int rv;
+ int rv = 0;
struct ipmi_system_interface_addr addr;
int hbnow = 0;
@@ -405,14 +405,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
msg.cmd = IPMI_WDOG_SET_TIMER;
msg.data = data;
msg.data_len = sizeof(data);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- smi_msg,
- recv_msg,
- 1);
+ if (smi_msg)
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ else
+ ipmi_panic_request_and_wait(watchdog_user,
+ (struct ipmi_addr *) &addr, &msg);
if (rv)
pr_warn("set timeout error: %d\n", rv);
else if (send_heartbeat_now)
@@ -431,9 +435,7 @@ static int _ipmi_set_timeout(int do_heartbeat)
atomic_set(&msg_tofree, 2);
- rv = __ipmi_set_timeout(&smi_msg,
- &recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(&smi_msg, &recv_msg, &send_heartbeat_now);
if (rv) {
atomic_set(&msg_tofree, 0);
return rv;
@@ -460,27 +462,10 @@ static int ipmi_set_timeout(int do_heartbeat)
return rv;
}
-static atomic_t panic_done_count = ATOMIC_INIT(0);
-
-static void panic_smi_free(struct ipmi_smi_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-static void panic_recv_free(struct ipmi_recv_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
static void panic_halt_ipmi_heartbeat(void)
{
struct kernel_ipmi_msg msg;
struct ipmi_system_interface_addr addr;
- int rv;
/*
* Don't reset the timer if we have the timer turned off, that
@@ -497,24 +482,10 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(2, &panic_done_count);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- &panic_halt_heartbeat_smi_msg,
- &panic_halt_heartbeat_recv_msg,
- 1);
- if (rv)
- atomic_sub(2, &panic_done_count);
+ ipmi_panic_request_and_wait(watchdog_user, (struct ipmi_addr *) &addr,
+ &msg);
}
-static struct ipmi_smi_msg panic_halt_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
/*
* Special call, doesn't claim any locks. This is only to be called
* at panic or halt time, in run-to-completion mode, when the caller
@@ -526,22 +497,13 @@ static void panic_halt_ipmi_set_timeout(void)
int send_heartbeat_now;
int rv;
- /* Wait for the messages to be free. */
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
- atomic_add(2, &panic_done_count);
- rv = __ipmi_set_timeout(&panic_halt_smi_msg,
- &panic_halt_recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(NULL, NULL, &send_heartbeat_now);
if (rv) {
- atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
panic_halt_ipmi_heartbeat();
}
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
}
static int __ipmi_heartbeat(void)
@@ -793,7 +755,7 @@ static ssize_t ipmi_read(struct file *file,
* Reading returns if the pretimeout has gone off, and it only does
* it once per pretimeout.
*/
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (!data_to_read) {
if (file->f_flags & O_NONBLOCK) {
rv = -EAGAIN;
@@ -804,9 +766,9 @@ static ssize_t ipmi_read(struct file *file,
add_wait_queue(&read_q, &wait);
while (!data_to_read && !signal_pending(current)) {
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
schedule();
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
}
remove_wait_queue(&read_q, &wait);
@@ -818,7 +780,7 @@ static ssize_t ipmi_read(struct file *file,
data_to_read = 0;
out:
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
if (rv == 0) {
if (copy_to_user(buf, &data_to_read, 1))
@@ -856,10 +818,10 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait)
poll_wait(file, &read_q, wait);
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (data_to_read)
mask |= (EPOLLIN | EPOLLRDNORM);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
return mask;
}
@@ -932,13 +894,11 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data)
if (atomic_inc_and_test(&preop_panic_excl))
panic("Watchdog pre-timeout");
} else if (preop_val == WDOG_PREOP_GIVE_DATA) {
- unsigned long flags;
-
- spin_lock_irqsave(&ipmi_read_lock, flags);
+ mutex_lock(&ipmi_read_mutex);
data_to_read = 1;
wake_up_interruptible(&read_q);
kill_fasync(&fasync_q, SIGIO, POLL_IN);
- spin_unlock_irqrestore(&ipmi_read_lock, flags);
+ mutex_unlock(&ipmi_read_mutex);
}
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 713573b6c86c..19c1ed280fd7 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -517,6 +517,7 @@ source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
source "drivers/clk/socfpga/Kconfig"
source "drivers/clk/sophgo/Kconfig"
+source "drivers/clk/spacemit/Kconfig"
source "drivers/clk/sprd/Kconfig"
source "drivers/clk/starfive/Kconfig"
source "drivers/clk/sunxi/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index bf4bd45adc3a..42867cd37c33 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -145,6 +145,7 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-y += socfpga/
obj-y += sophgo/
+obj-y += spacemit/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 2b0ea882f1e4..0171e6b2bfca 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -53,24 +53,6 @@ static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
}
-/*
- * Build a scaled divider value as close as possible to the
- * given whole part (div_value) and fractional part (expressed
- * in billionths).
- */
-u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
-{
- u64 combined;
-
- BUG_ON(!div_value);
- BUG_ON(billionths >= BILLION);
-
- combined = (u64)div_value * BILLION + billionths;
- combined <<= div->u.s.frac_width;
-
- return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
-}
-
/* The scaled minimum divisor representable by a divider */
static inline u64
scaled_div_min(struct bcm_clk_div *div)
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index e09655024ac2..348a3454ce40 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -492,8 +492,6 @@ extern struct clk_ops kona_peri_clk_ops;
/* Externally visible functions */
extern u64 scaled_div_max(struct bcm_clk_div *div);
-extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
- u32 billionths);
extern void __init kona_dt_ccu_setup(struct ccu_data *ccu,
struct device_node *node);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 0e1fe3759530..8e4fde03ed23 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -286,6 +286,8 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
init.name = devm_kasprintf(rpi->dev, GFP_KERNEL,
"fw-clk-%s",
rpi_firmware_clk_names[id]);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
init.ops = &raspberrypi_firmware_clk_ops;
init.flags = CLK_GET_RATE_NOCACHE;
@@ -480,4 +482,3 @@ module_platform_driver(raspberrypi_clk_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
MODULE_DESCRIPTION("Raspberry Pi firmware clock driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:raspberrypi-clk");
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 6807a2efa93b..bfb6bbdc036c 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -763,13 +763,14 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
return PTR_ERR(clk);
}
- child = of_get_child_by_name(node, "pllout");
- if (of_device_is_available(child))
+ child = of_get_available_child_by_name(node, "pllout");
+ if (child) {
of_clk_add_provider(child, of_clk_src_simple_get, clk);
- of_node_put(child);
+ of_node_put(child);
+ }
- child = of_get_child_by_name(node, "sysclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "sysclk");
+ if (child) {
struct clk_onecell_data *clk_data;
struct clk **clks;
int n_clks = max_sysclk_id + 1;
@@ -803,11 +804,11 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
clks[(*div_info)->id] = clk;
}
of_clk_add_provider(child, of_clk_src_onecell_get, clk_data);
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "auxclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "auxclk");
+ if (child) {
char child_name[MAX_NAME_SIZE];
snprintf(child_name, MAX_NAME_SIZE, "%s_auxclk", info->name);
@@ -818,11 +819,12 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
child_name, PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "obsclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "obsclk");
+ if (child) {
if (obsclk_info)
clk = davinci_pll_obsclk_register(dev, obsclk_info, base);
else
@@ -833,8 +835,8 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ of_node_put(child);
}
- of_node_put(child);
return 0;
}
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index be2e3a5f8336..ff003dc5ab20 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -55,7 +55,7 @@ config COMMON_CLK_MESON_CPU_DYNDIV
config COMMON_CLK_MESON8B
bool "Meson8 SoC Clock controller support"
depends on ARM
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
@@ -70,7 +70,7 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
tristate "GXBB and GXL SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_VID_PLL_DIV
@@ -86,7 +86,7 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
tristate "AXG SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -136,7 +136,7 @@ config COMMON_CLK_A1_PERIPHERALS
config COMMON_CLK_C3_PLL
tristate "Amlogic C3 PLL clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_CLKC_UTILS
@@ -149,7 +149,7 @@ config COMMON_CLK_C3_PLL
config COMMON_CLK_C3_PERIPHERALS
tristate "Amlogic C3 peripherals clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_CLKC_UTILS
@@ -163,7 +163,7 @@ config COMMON_CLK_C3_PERIPHERALS
config COMMON_CLK_G12A
tristate "G12 and SM1 SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -181,7 +181,7 @@ config COMMON_CLK_G12A
config COMMON_CLK_S4_PLL
tristate "S4 SoC PLL clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
@@ -194,7 +194,7 @@ config COMMON_CLK_S4_PLL
config COMMON_CLK_S4_PERIPHERALS
tristate "S4 SoC peripherals clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index ceabebb1863d..d9e546e006d7 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -4093,6 +4093,7 @@ static const struct clk_parent_data spicc_sclk_parent_data[] = {
{ .hw = &g12a_clk81.hw },
{ .hw = &g12a_fclk_div4.hw },
{ .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div2.hw },
{ .hw = &g12a_fclk_div5.hw },
{ .hw = &g12a_fclk_div7.hw },
};
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index 76ece6c4a969..3ba01622d8f0 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -111,7 +111,11 @@ static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev)
* driver, there seems to be no better place to do this. So do it here!
*/
cpu_dev = get_cpu_device(0);
- dev_pm_domain_attach(cpu_dev, true);
+ ret = dev_pm_domain_attach(cpu_dev, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "can't get PM domain: %d\n", ret);
+ goto err;
+ }
return 0;
diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c
index 11bd2e234811..50e5a131261b 100644
--- a/drivers/clk/qcom/camcc-sa8775p.c
+++ b/drivers/clk/qcom/camcc-sa8775p.c
@@ -10,7 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <dt-bindings/clock/qcom,sa8775p-camcc.h>
+#include <dt-bindings/clock/qcom,qcs8300-camcc.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -1681,6 +1681,24 @@ static struct clk_branch cam_cc_sm_obs_clk = {
},
};
+static struct clk_branch cam_cc_titan_top_accu_shift_clk = {
+ .halt_reg = 0x131f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x131f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_titan_top_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc cam_cc_titan_top_gdsc = {
.gdscr = 0x131bc,
.en_rest_wait_val = 0x2,
@@ -1775,6 +1793,7 @@ static struct clk_regmap *cam_cc_sa8775p_clocks[] = {
[CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
[CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr,
+ [CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = NULL,
[CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
[CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
};
@@ -1811,6 +1830,7 @@ static const struct qcom_cc_desc cam_cc_sa8775p_desc = {
};
static const struct of_device_id cam_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-camcc" },
{ .compatible = "qcom,sa8775p-camcc" },
{ }
};
@@ -1841,10 +1861,83 @@ static int cam_cc_sa8775p_probe(struct platform_device *pdev)
clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- /* Keep some clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
- qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ if (device_is_compatible(&pdev->dev, "qcom,qcs8300-camcc")) {
+ cam_cc_camnoc_axi_clk_src.cmd_rcgr = 0x13154;
+ cam_cc_camnoc_axi_clk.halt_reg = 0x1316c;
+ cam_cc_camnoc_axi_clk.clkr.enable_reg = 0x1316c;
+ cam_cc_camnoc_dcd_xo_clk.halt_reg = 0x13174;
+ cam_cc_camnoc_dcd_xo_clk.clkr.enable_reg = 0x13174;
+
+ cam_cc_csi0phytimer_clk_src.cmd_rcgr = 0x15054;
+ cam_cc_csi1phytimer_clk_src.cmd_rcgr = 0x15078;
+ cam_cc_csi2phytimer_clk_src.cmd_rcgr = 0x15098;
+ cam_cc_csid_clk_src.cmd_rcgr = 0x13134;
+
+ cam_cc_mclk0_clk_src.cmd_rcgr = 0x15000;
+ cam_cc_mclk1_clk_src.cmd_rcgr = 0x1501c;
+ cam_cc_mclk2_clk_src.cmd_rcgr = 0x15038;
+
+ cam_cc_fast_ahb_clk_src.cmd_rcgr = 0x13104;
+ cam_cc_slow_ahb_clk_src.cmd_rcgr = 0x1311c;
+ cam_cc_xo_clk_src.cmd_rcgr = 0x131b8;
+ cam_cc_sleep_clk_src.cmd_rcgr = 0x131d4;
+
+ cam_cc_core_ahb_clk.halt_reg = 0x131b4;
+ cam_cc_core_ahb_clk.clkr.enable_reg = 0x131b4;
+
+ cam_cc_cpas_ahb_clk.halt_reg = 0x130f4;
+ cam_cc_cpas_ahb_clk.clkr.enable_reg = 0x130f4;
+ cam_cc_cpas_fast_ahb_clk.halt_reg = 0x130fc;
+ cam_cc_cpas_fast_ahb_clk.clkr.enable_reg = 0x130fc;
+
+ cam_cc_csi0phytimer_clk.halt_reg = 0x1506c;
+ cam_cc_csi0phytimer_clk.clkr.enable_reg = 0x1506c;
+ cam_cc_csi1phytimer_clk.halt_reg = 0x15090;
+ cam_cc_csi1phytimer_clk.clkr.enable_reg = 0x15090;
+ cam_cc_csi2phytimer_clk.halt_reg = 0x150b0;
+ cam_cc_csi2phytimer_clk.clkr.enable_reg = 0x150b0;
+ cam_cc_csid_clk.halt_reg = 0x1314c;
+ cam_cc_csid_clk.clkr.enable_reg = 0x1314c;
+ cam_cc_csid_csiphy_rx_clk.halt_reg = 0x15074;
+ cam_cc_csid_csiphy_rx_clk.clkr.enable_reg = 0x15074;
+ cam_cc_csiphy0_clk.halt_reg = 0x15070;
+ cam_cc_csiphy0_clk.clkr.enable_reg = 0x15070;
+ cam_cc_csiphy1_clk.halt_reg = 0x15094;
+ cam_cc_csiphy1_clk.clkr.enable_reg = 0x15094;
+ cam_cc_csiphy2_clk.halt_reg = 0x150b4;
+ cam_cc_csiphy2_clk.clkr.enable_reg = 0x150b4;
+
+ cam_cc_mclk0_clk.halt_reg = 0x15018;
+ cam_cc_mclk0_clk.clkr.enable_reg = 0x15018;
+ cam_cc_mclk1_clk.halt_reg = 0x15034;
+ cam_cc_mclk1_clk.clkr.enable_reg = 0x15034;
+ cam_cc_mclk2_clk.halt_reg = 0x15050;
+ cam_cc_mclk2_clk.clkr.enable_reg = 0x15050;
+ cam_cc_qdss_debug_xo_clk.halt_reg = 0x1319c;
+ cam_cc_qdss_debug_xo_clk.clkr.enable_reg = 0x1319c;
+
+ cam_cc_titan_top_gdsc.gdscr = 0x131a0;
+
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSIPHY3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] =
+ &cam_cc_titan_top_accu_shift_clk.clkr;
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13178); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131d0); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_SLEEP_CLK */
+ } else {
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ }
ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap);
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
index 1871970fb046..8aac97d29ce3 100644
--- a/drivers/clk/qcom/camcc-sm6350.c
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -1695,6 +1695,9 @@ static struct clk_branch camcc_sys_tmr_clk = {
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "bps_gdsc",
},
@@ -1704,6 +1707,9 @@ static struct gdsc bps_gdsc = {
static struct gdsc ipe_0_gdsc = {
.gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ipe_0_gdsc",
},
@@ -1713,6 +1719,9 @@ static struct gdsc ipe_0_gdsc = {
static struct gdsc ife_0_gdsc = {
.gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_0_gdsc",
},
@@ -1721,6 +1730,9 @@ static struct gdsc ife_0_gdsc = {
static struct gdsc ife_1_gdsc = {
.gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_1_gdsc",
},
@@ -1729,6 +1741,9 @@ static struct gdsc ife_1_gdsc = {
static struct gdsc ife_2_gdsc = {
.gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_2_gdsc",
},
@@ -1737,6 +1752,9 @@ static struct gdsc ife_2_gdsc = {
static struct gdsc titan_top_gdsc = {
.gdscr = 0x14004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "titan_top_gdsc",
},
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index c7675930fde1..00fb3e53a388 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -66,6 +66,8 @@ struct clk_rpmh {
struct clk_rpmh_desc {
struct clk_hw **clks;
size_t num_clks;
+ /* RPMh clock clkaN are optional for this platform */
+ bool clka_optional;
};
static DEFINE_MUTEX(rpmh_clk_lock);
@@ -648,6 +650,7 @@ static struct clk_hw *sm8550_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8550 = {
.clks = sm8550_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8550_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sm8650_rpmh_clocks[] = {
@@ -679,6 +682,7 @@ static struct clk_hw *sm8650_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8650 = {
.clks = sm8650_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8650_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sc7280_rpmh_clocks[] = {
@@ -847,6 +851,7 @@ static struct clk_hw *sm8750_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
.clks = sm8750_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8750_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
@@ -890,6 +895,12 @@ static int clk_rpmh_probe(struct platform_device *pdev)
rpmh_clk = to_clk_rpmh(hw_clks[i]);
res_addr = cmd_db_read_addr(rpmh_clk->res_name);
if (!res_addr) {
+ hw_clks[i] = NULL;
+
+ if (desc->clka_optional &&
+ !strncmp(rpmh_clk->res_name, "clka", sizeof("clka") - 1))
+ continue;
+
dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
rpmh_clk->res_name);
return -ENODEV;
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index e703ecf00e44..b0bd163a449c 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -681,6 +681,9 @@ static struct clk_branch disp_cc_xo_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x1004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 7431c9a65044..45193b3d714b 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -432,7 +432,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
{ P_GPLL1_AUX, 2 },
- { P_GPLL6, 2 },
+ { P_GPLL6, 3 },
{ P_SLEEP_CLK, 6 },
};
@@ -1113,7 +1113,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
};
static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
- F(24000000, P_GPLL0, 1, 1, 45),
+ F(24000000, P_GPLL6, 1, 1, 45),
F(66670000, P_GPLL0, 12, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 74346dc02606..a4d6dff9d0f7 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -2320,6 +2320,9 @@ static struct clk_branch gcc_video_xo_clk = {
static struct gdsc usb30_prim_gdsc = {
.gdscr = 0x1a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "usb30_prim_gdsc",
},
@@ -2328,6 +2331,9 @@ static struct gdsc usb30_prim_gdsc = {
static struct gdsc ufs_phy_gdsc = {
.gdscr = 0x3a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ufs_phy_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index fa1672c4e7d8..24f98062b9dd 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3817,7 +3817,9 @@ static int gcc_sm8650_probe(struct platform_device *pdev)
qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_VIDEO_AHB_CLK */
qcom_branch_set_clk_en(regmap, 0x32030); /* GCC_VIDEO_XO_CLK */
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52150, 0x0);
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
index b36d70976095..8092dd6b37b5 100644
--- a/drivers/clk/qcom/gcc-sm8750.c
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -3244,8 +3244,9 @@ static int gcc_sm8750_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x52010, BIT(20), BIT(20));
regmap_update_bits(regmap, 0x52010, BIT(21), BIT(21));
- /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
return qcom_cc_really_probe(&pdev->dev, &gcc_sm8750_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 009f39139b64..3e44757e25d3 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -6753,6 +6753,10 @@ static int gcc_x1e80100_probe(struct platform_device *pdev)
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52224, 0x0);
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
+
return qcom_cc_really_probe(&pdev->dev, &gcc_x1e80100_desc, regmap);
}
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
index 35ed0500bc59..ee89c42413f8 100644
--- a/drivers/clk/qcom/gpucc-sm6350.c
+++ b/drivers/clk/qcom/gpucc-sm6350.c
@@ -413,6 +413,9 @@ static struct clk_branch gpu_cc_gx_vsense_clk = {
static struct gdsc gpu_cx_gdsc = {
.gdscr = 0x106c,
.gds_hw_ctrl = 0x1540,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
.pd = {
.name = "gpu_cx_gdsc",
},
@@ -423,6 +426,9 @@ static struct gdsc gpu_cx_gdsc = {
static struct gdsc gpu_gx_gdsc = {
.gdscr = 0x100c,
.clamp_io_ctrl = 0x1508,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
.pd = {
.name = "gpu_gx_gdsc",
.power_on = gdsc_gx_do_nothing_enable,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 5a4bc3f94d49..50c20119d12a 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -41,6 +41,7 @@ config CLK_RENESAS
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
select CLK_R9A09G047 if ARCH_R9A09G047
+ select CLK_R9A09G056 if ARCH_R9A09G056
select CLK_R9A09G057 if ARCH_R9A09G057
select CLK_SH73A0 if ARCH_SH73A0
@@ -199,6 +200,10 @@ config CLK_R9A09G047
bool "RZ/G3E clock support" if COMPILE_TEST
select CLK_RZV2H
+config CLK_R9A09G056
+ bool "RZ/V2N clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 2d6e746939c4..f9075bca6e95 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
+obj-$(CONFIG_CLK_R9A09G056) += r9a09g056-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
index e9cf4342d0cf..21699999cedd 100644
--- a/drivers/clk/renesas/r9a09g047-cpg.c
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G047_IOTOP_0_SHCLK,
+ LAST_DT_CORE_CLK = R9A09G047_GBETH_1_CLK_PTP_REF_I,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -31,7 +31,14 @@ enum clk_ids {
CLK_PLLVDO,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
@@ -41,6 +48,7 @@ enum clk_ids {
CLK_PLLDTY_ACPU_DIV4,
CLK_PLLDTY_DIV16,
CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_GPU,
/* Module Clocks */
MOD_CLK_BASE,
@@ -60,6 +68,14 @@ static const struct clk_div_table dtable_2_4[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -69,6 +85,10 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+/* Mux clock tables */
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
@@ -79,12 +99,21 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
- DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
@@ -96,6 +125,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_gpu", CLK_PLLVDO_GPU, CLK_PLLVDO, CDDIV3_DIVCTL1, dtable_2_64),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
@@ -108,6 +138,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G047_CA55_0_CORECLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("spi_clk_spi", R9A09G047_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2),
};
static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
@@ -153,6 +184,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(10, BIT(14))),
DEF_MOD("canfd_0_clkc", CLK_PLLCLN_DIV20, 9, 14, 4, 30,
BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD_NO_PM("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
BUS_MSTOP(8, BIT(2))),
DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
@@ -183,6 +220,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(4))),
DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("ge3d_clk", CLK_PLLVDO_GPU, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
DEF_MOD("tsu_1_pclk", CLK_QEXTAL, 16, 10, 8, 10,
BUS_MSTOP(2, BIT(15))),
};
@@ -207,12 +250,17 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
DEF_RST(10, 1, 4, 18), /* CANFD_0_RSTP_N */
DEF_RST(10, 2, 4, 19), /* CANFD_0_RSTC_N */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GE3D_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GE3D_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GE3D_ACE_RESETN */
DEF_RST(15, 8, 7, 9), /* TSU_1_PRESETN */
};
diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c
new file mode 100644
index 000000000000..e2712a25c43a
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g056-cpg.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2N CPG driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g056-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G056_GBETH_1_CLK_PTP_REF_I,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV2,
+ CLK_PLLCLN_DIV8,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
+ DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G056_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G056_CA55_0_CORE_CLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G056_CA55_0_CORE_CLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G056_CA55_0_CORE_CLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G056_CA55_0_CORE_CLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
+ DEF_FIXED("iotop_0_shclk", R9A09G056_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+};
+
+static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
+ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
+ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+};
+
+const struct rzv2h_cpg_info r9a09g056_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g056_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g056_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g056_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g056_mod_clks),
+ .num_hw_mod_clks = 25 * 16,
+
+ /* Resets */
+ .resets = r9a09g056_resets,
+ .num_resets = ARRAY_SIZE(r9a09g056_resets),
+
+ .num_mstop_bits = 192,
+};
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index d63eafbca780..3c40e36259fe 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G057_IOTOP_0_SHCLK,
+ LAST_DT_CORE_CLK = R9A09G057_GBETH_1_CLK_PTP_REF_I,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -29,6 +29,7 @@ enum clk_ids {
CLK_PLLDTY,
CLK_PLLCA55,
CLK_PLLVDO,
+ CLK_PLLGPU,
/* Internal Core Clocks */
CLK_PLLCM33_DIV4,
@@ -40,6 +41,7 @@ enum clk_ids {
CLK_PLLDTY_ACPU,
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
CLK_PLLDTY_DIV16,
CLK_PLLDTY_RCPU,
CLK_PLLDTY_RCPU_DIV4,
@@ -47,6 +49,7 @@ enum clk_ids {
CLK_PLLVDO_CRU1,
CLK_PLLVDO_CRU2,
CLK_PLLVDO_CRU3,
+ CLK_PLLGPU_GEAR,
/* Module Clocks */
MOD_CLK_BASE,
@@ -85,8 +88,9 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
- DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
+ DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
@@ -101,6 +105,7 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
@@ -110,6 +115,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
+ DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
+
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
DEF_DDIV("ca55_0_coreclk0", R9A09G057_CA55_0_CORE_CLK0, CLK_PLLCA55,
@@ -121,6 +128,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G057_CA55_0_CORE_CLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G057_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb2_0_clk_core1", R9A09G057_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
@@ -214,6 +223,16 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20,
+ BUS_MSTOP(7, BIT(8))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23,
+ BUS_MSTOP(7, BIT(11))),
DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
BUS_MSTOP(9, BIT(4))),
DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
@@ -238,6 +257,12 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(7))),
DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
@@ -275,6 +300,10 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
@@ -287,6 +316,9 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index da021ee446ec..71431970d6e6 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -27,6 +27,7 @@
#include <linux/psci.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -204,7 +205,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
spin_lock_irqsave(&priv->rmw_lock, flags);
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index b91dfbfb01e3..a8628f64a03b 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -27,6 +27,7 @@
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/units.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -1217,7 +1218,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
}
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
value = bitmask << 16;
if (enable)
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index 2b9771ab2b3f..bcc496e8cbcd 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -25,6 +25,7 @@
#include <linux/pm_domain.h>
#include <linux/refcount.h>
#include <linux/reset-controller.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -44,10 +45,18 @@
#define CPG_BUS_1_MSTOP (0xd00)
#define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
-#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
-#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
-#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
-#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
+#define CPG_PLL_STBY(x) ((x))
+#define CPG_PLL_STBY_RESETB BIT(0)
+#define CPG_PLL_STBY_RESETB_WEN BIT(16)
+#define CPG_PLL_CLK1(x) ((x) + 0x004)
+#define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x)))
+#define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x))
+#define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x))
+#define CPG_PLL_CLK2(x) ((x) + 0x008)
+#define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x))
+#define CPG_PLL_MON(x) ((x) + 0x010)
+#define CPG_PLL_MON_RESETB BIT(0)
+#define CPG_PLL_MON_LOCK BIT(4)
#define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
@@ -94,8 +103,7 @@ struct pll_clk {
struct rzv2h_cpg_priv *priv;
void __iomem *base;
struct clk_hw hw;
- unsigned int conf;
- unsigned int type;
+ struct pll pll;
};
#define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
@@ -110,7 +118,7 @@ struct pll_clk {
* @on_index: register offset
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
- * @mon_bit: montor bit
+ * @mon_bit: monitor bit
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
@@ -140,27 +148,78 @@ struct ddiv_clk {
#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
+static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset));
+
+ /* Ensure both RESETB and LOCK bits are set */
+ return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK);
+}
+
+static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
+ u32 stby_offset;
+ u32 mon_offset;
+ u32 val;
+ int ret;
+
+ if (rzv2h_cpg_pll_clk_is_enabled(hw))
+ return 0;
+
+ stby_offset = CPG_PLL_STBY(pll.offset);
+ mon_offset = CPG_PLL_MON(pll.offset);
+
+ writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
+ priv->base + stby_offset);
+
+ /*
+ * Ensure PLL enters into normal mode
+ *
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Since this latency might depend on external crystal or PLL rate,
+ * use a "super" safe timeout value.
+ */
+ ret = readl_poll_timeout_atomic(priv->base + mon_offset, val,
+ (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n",
+ stby_offset, hw->clk);
+
+ return ret;
+}
+
static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct pll_clk *pll_clk = to_pll(hw);
struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
unsigned int clk1, clk2;
u64 rate;
- if (!PLL_CLK_ACCESS(pll_clk->conf))
+ if (!pll.has_clkn)
return 0;
- clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
- clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
+ clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
+ clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
- rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
- 16 + SDIV(clk2));
+ rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) +
+ CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2));
- return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
+ return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1));
}
static const struct clk_ops rzv2h_cpg_pll_ops = {
+ .is_enabled = rzv2h_cpg_pll_clk_is_enabled,
+ .enable = rzv2h_cpg_pll_clk_enable,
.recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
};
@@ -193,10 +252,9 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
init.num_parents = 1;
pll_clk->hw.init = &init;
- pll_clk->conf = core->cfg.conf;
+ pll_clk->pll = core->cfg.pll;
pll_clk->base = base;
pll_clk->priv = priv;
- pll_clk->type = core->type;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret)
@@ -241,6 +299,9 @@ static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon
u32 bitmask = BIT(mon);
u32 val;
+ if (mon == CSDIV_NO_MON)
+ return 0;
+
return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
}
@@ -272,12 +333,6 @@ static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
writel(val, divider->reg);
ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
- if (ret)
- goto ddiv_timeout;
-
- spin_unlock_irqrestore(divider->lock, flags);
-
- return 0;
ddiv_timeout:
spin_unlock_irqrestore(divider->lock, flags);
@@ -320,7 +375,10 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return ERR_PTR(-ENOMEM);
init.name = core->name;
- init.ops = &rzv2h_ddiv_clk_divider_ops;
+ if (cfg_ddiv.no_rmw)
+ init.ops = &clk_divider_ops;
+ else
+ init.ops = &rzv2h_ddiv_clk_divider_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -342,6 +400,24 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return div->hw.clk;
}
+static struct clk * __init
+rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct smuxed mux = core->cfg.smux;
+ const struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag, priv->base + mux.offset,
+ mux.shift, mux.width,
+ core->mux_flags, &priv->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
static struct clk
*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
void *data)
@@ -426,6 +502,9 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_DDIV:
clk = rzv2h_cpg_ddiv_clk_register(core, priv);
break;
+ case CLK_TYPE_SMUX:
+ clk = rzv2h_cpg_mux_clk_register(core, priv);
+ break;
default:
goto fail;
}
@@ -494,11 +573,14 @@ static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
if (clock->mon_index >= 0) {
offset = GET_CLK_MON_OFFSET(clock->mon_index);
bitmask = BIT(clock->mon_bit);
- } else {
- offset = GET_CLK_ON_OFFSET(clock->on_index);
- bitmask = BIT(clock->on_bit);
+
+ if (!(readl(priv->base + offset) & bitmask))
+ return 0;
}
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+
return readl(priv->base + offset) & bitmask;
}
@@ -514,7 +596,7 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
if (enabled == enable)
return 0;
@@ -658,8 +740,8 @@ fail:
mod->name, PTR_ERR(clk));
}
-static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
@@ -667,35 +749,31 @@ static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
u8 monbit = priv->resets[id].mon_bit;
u32 value = mask << 16;
- dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
+ dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
+ assert ? "assert" : "deassert", id, reg);
+ if (!assert)
+ value |= mask;
writel(value, priv->base + reg);
reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
mask = BIT(monbit);
return readl_poll_timeout_atomic(priv->base + reg, value,
- value & mask, 10, 200);
+ assert ? (value & mask) : !(value & mask),
+ 10, 200);
+}
+
+static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return __rzv2h_cpg_assert(rcdev, id, true);
}
static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
- unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
- u32 mask = BIT(priv->resets[id].reset_bit);
- u8 monbit = priv->resets[id].mon_bit;
- u32 value = (mask << 16) | mask;
-
- dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
-
- writel(value, priv->base + reg);
-
- reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
- mask = BIT(monbit);
-
- return readl_poll_timeout_atomic(priv->base + reg, value,
- !(value & mask), 10, 200);
+ return __rzv2h_cpg_assert(rcdev, id, false);
}
static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
@@ -967,18 +1045,24 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
}
static const struct of_device_id rzv2h_cpg_match[] = {
-#ifdef CONFIG_CLK_R9A09G057
- {
- .compatible = "renesas,r9a09g057-cpg",
- .data = &r9a09g057_cpg_info,
- },
-#endif
#ifdef CONFIG_CLK_R9A09G047
{
.compatible = "renesas,r9a09g047-cpg",
.data = &r9a09g047_cpg_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G056
+ {
+ .compatible = "renesas,r9a09g056-cpg",
+ .data = &r9a09g056_cpg_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R9A09G057
+ {
+ .compatible = "renesas,r9a09g057-cpg",
+ .data = &r9a09g057_cpg_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 576a070763cb..9104b1cd276c 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -11,20 +11,51 @@
#include <linux/bitfield.h>
/**
+ * struct pll - Structure for PLL configuration
+ *
+ * @offset: STBY register offset
+ * @has_clkn: Flag to indicate if CLK1/2 are accessible or not
+ */
+struct pll {
+ unsigned int offset:9;
+ unsigned int has_clkn:1;
+};
+
+#define PLL_PACK(_offset, _has_clkn) \
+ ((struct pll){ \
+ .offset = _offset, \
+ .has_clkn = _has_clkn \
+ })
+
+#define PLLCA55 PLL_PACK(0x60, 1)
+#define PLLGPU PLL_PACK(0x120, 1)
+
+/**
* struct ddiv - Structure for dynamic switching divider
*
* @offset: register offset
* @shift: position of the divider bit
* @width: width of the divider
* @monbit: monitor bit in CPG_CLKSTATUS0 register
+ * @no_rmw: flag to indicate if the register is read-modify-write
+ * (1: no RMW, 0: RMW)
*/
struct ddiv {
unsigned int offset:11;
unsigned int shift:4;
unsigned int width:4;
unsigned int monbit:5;
+ unsigned int no_rmw:1;
};
+/*
+ * On RZ/V2H(P), the dynamic divider clock supports up to 19 monitor bits,
+ * while on RZ/G3E, it supports up to 16 monitor bits. Use the maximum value
+ * `0x1f` to indicate that monitor bits are not supported for static divider
+ * clocks.
+ */
+#define CSDIV_NO_MON (0x1f)
+
#define DDIV_PACK(_offset, _shift, _width, _monbit) \
((struct ddiv){ \
.offset = _offset, \
@@ -33,10 +64,41 @@ struct ddiv {
.monbit = _monbit \
})
+#define DDIV_PACK_NO_RMW(_offset, _shift, _width, _monbit) \
+ ((struct ddiv){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .monbit = (_monbit), \
+ .no_rmw = 1 \
+ })
+
+/**
+ * struct smuxed - Structure for static muxed clocks
+ *
+ * @offset: register offset
+ * @shift: position of the divider field
+ * @width: width of the divider field
+ */
+struct smuxed {
+ unsigned int offset:11;
+ unsigned int shift:4;
+ unsigned int width:4;
+};
+
+#define SMUX_PACK(_offset, _shift, _width) \
+ ((struct smuxed){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ })
+
+#define CPG_SSEL1 (0x304)
#define CPG_CDDIV0 (0x400)
#define CPG_CDDIV1 (0x404)
#define CPG_CDDIV3 (0x40C)
#define CPG_CDDIV4 (0x410)
+#define CPG_CSDIV0 (0x500)
#define CDDIV0_DIVCTL1 DDIV_PACK(CPG_CDDIV0, 4, 3, 1)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
@@ -44,12 +106,18 @@ struct ddiv {
#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV3_DIVCTL1 DDIV_PACK(CPG_CDDIV3, 4, 3, 13)
#define CDDIV3_DIVCTL2 DDIV_PACK(CPG_CDDIV3, 8, 3, 14)
#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
#define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16)
#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+#define CSDIV0_DIVCTL3 DDIV_PACK_NO_RMW(CPG_CSDIV0, 12, 2, CSDIV_NO_MON)
+
+#define SSEL1_SELCTL2 SMUX_PACK(CPG_SSEL1, 8, 1)
+#define SSEL1_SELCTL3 SMUX_PACK(CPG_SSEL1, 12, 1)
+
#define BUS_MSTOP_IDX_MASK GENMASK(31, 16)
#define BUS_MSTOP_BITS_MASK GENMASK(15, 0)
#define BUS_MSTOP(idx, mask) (FIELD_PREP_CONST(BUS_MSTOP_IDX_MASK, (idx)) | \
@@ -74,8 +142,13 @@ struct cpg_core_clk {
union {
unsigned int conf;
struct ddiv ddiv;
+ struct pll pll;
+ struct smuxed smux;
} cfg;
const struct clk_div_table *dtable;
+ const char * const *parent_names;
+ unsigned int num_parents;
+ u8 mux_flags;
u32 flag;
};
@@ -85,20 +158,15 @@ enum clk_types {
CLK_TYPE_FF, /* Fixed Factor Clock */
CLK_TYPE_PLL,
CLK_TYPE_DDIV, /* Dynamic Switching Divider */
+ CLK_TYPE_SMUX, /* Static Mux */
};
-/* BIT(31) indicates if CLK1/2 are accessible or not */
-#define PLL_CONF(n) (BIT(31) | ((n) & ~GENMASK(31, 16)))
-#define PLL_CLK_ACCESS(n) ((n) & BIT(31) ? 1 : 0)
-#define PLL_CLK1_OFFSET(n) ((n) & ~GENMASK(31, 16))
-#define PLL_CLK2_OFFSET(n) (((n) & ~GENMASK(31, 16)) + (0x4))
-
#define DEF_TYPE(_name, _id, _type...) \
{ .name = _name, .id = _id, .type = _type }
#define DEF_BASE(_name, _id, _type, _parent...) \
DEF_TYPE(_name, _id, _type, .parent = _parent)
-#define DEF_PLL(_name, _id, _parent, _conf) \
- DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.conf = _conf)
+#define DEF_PLL(_name, _id, _parent, _pll_packed) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.pll = _pll_packed)
#define DEF_INPUT(_name, _id) \
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
@@ -109,6 +177,15 @@ enum clk_types {
.parent = _parent, \
.dtable = _dtable, \
.flag = CLK_DIVIDER_HIWORD_MASK)
+#define DEF_CSDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
+ DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable)
+#define DEF_SMUX(_name, _id, _smux_packed, _parent_names) \
+ DEF_TYPE(_name, _id, CLK_TYPE_SMUX, \
+ .cfg.smux = _smux_packed, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .flag = CLK_SET_RATE_PARENT, \
+ .mux_flags = CLK_MUX_HIWORD_MASK)
/**
* struct rzv2h_mod_clk - Module Clocks definitions
@@ -221,6 +298,7 @@ struct rzv2h_cpg_info {
};
extern const struct rzv2h_cpg_info r9a09g047_cpg_info;
+extern const struct rzv2h_cpg_info r9a09g056_cpg_info;
extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index e8ece20aebfd..c281a9738d9f 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_COMMON_CLK_ROCKCHIP) += clk-rockchip.o
clk-rockchip-y += clk.o
clk-rockchip-y += clk-pll.o
clk-rockchip-y += clk-cpu.o
+clk-rockchip-y += clk-gate-grf.o
clk-rockchip-y += clk-half-divider.o
clk-rockchip-y += clk-inverter.o
clk-rockchip-y += clk-mmc-phase.o
diff --git a/drivers/clk/rockchip/clk-gate-grf.c b/drivers/clk/rockchip/clk-gate-grf.c
new file mode 100644
index 000000000000..8122f471f391
--- /dev/null
+++ b/drivers/clk/rockchip/clk-gate-grf.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025 Collabora Ltd.
+ * Author: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ *
+ * Certain clocks on Rockchip are "gated" behind an additional register bit
+ * write in a GRF register, such as the SAI MCLKs on RK3576. This code
+ * implements a clock driver for these types of gates, based on regmaps.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+struct rockchip_gate_grf {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int reg;
+ unsigned int shift;
+ u8 flags;
+};
+
+#define to_gate_grf(_hw) container_of(_hw, struct rockchip_gate_grf, hw)
+
+static int rockchip_gate_grf_enable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? BIT(gate->shift) : 0;
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+ int ret;
+
+ ret = regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+
+ return ret;
+}
+
+static void rockchip_gate_grf_disable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : BIT(gate->shift);
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+
+ regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+}
+
+static int rockchip_gate_grf_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ bool invert = !!(gate->flags & CLK_GATE_SET_TO_DISABLE);
+ int ret;
+
+ ret = regmap_test_bits(gate->regmap, gate->reg, BIT(gate->shift));
+ if (ret < 0)
+ ret = 0;
+
+ return invert ? 1 - ret : ret;
+
+}
+
+static const struct clk_ops rockchip_gate_grf_ops = {
+ .enable = rockchip_gate_grf_enable,
+ .disable = rockchip_gate_grf_disable,
+ .is_enabled = rockchip_gate_grf_is_enabled,
+};
+
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg, unsigned int shift,
+ u8 gate_flags)
+{
+ struct rockchip_gate_grf *gate;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ if (IS_ERR(regmap)) {
+ pr_err("%s: regmap not available\n", __func__);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags;
+ init.num_parents = parent_name ? 1 : 0;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.ops = &rockchip_gate_grf_ops;
+
+ gate->hw.init = &init;
+ gate->regmap = regmap;
+ gate->reg = reg;
+ gate->shift = shift;
+ gate->flags = gate_flags;
+
+ clk = clk_register(NULL, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 91012078681b..b3ed8e7523e5 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -9,11 +9,14 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/regmap.h>
#include "clk.h"
struct rockchip_mmc_clock {
struct clk_hw hw;
void __iomem *reg;
+ struct regmap *grf;
+ int grf_reg;
int shift;
int cached_phase;
struct notifier_block clk_rate_change_nb;
@@ -54,7 +57,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
if (!rate)
return 0;
- raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+ if (mmc_clock->grf)
+ regmap_read(mmc_clock->grf, mmc_clock->grf_reg, &raw_value);
+ else
+ raw_value = readl(mmc_clock->reg);
+
+ raw_value >>= mmc_clock->shift;
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -134,8 +142,12 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
raw_value |= nineties;
- writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift),
- mmc_clock->reg);
+ raw_value = HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift);
+
+ if (mmc_clock->grf)
+ regmap_write(mmc_clock->grf, mmc_clock->grf_reg, raw_value);
+ else
+ writel(raw_value, mmc_clock->reg);
pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
clk_hw_get_name(hw), degrees, delay_num,
@@ -189,7 +201,9 @@ static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift)
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift)
{
struct clk_init_data init;
struct rockchip_mmc_clock *mmc_clock;
@@ -208,6 +222,8 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->hw.init = &init;
mmc_clock->reg = reg;
+ mmc_clock->grf = grf;
+ mmc_clock->grf_reg = grf_reg;
mmc_clock->shift = shift;
clk = clk_register(NULL, &mmc_clock->hw);
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 2c2abb3b4210..af74439a7457 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -1027,16 +1027,6 @@ static int rockchip_rk3588_pll_is_enabled(struct clk_hw *hw)
return !(pllcon & RK3588_PLLCON1_PWRDOWN);
}
-static int rockchip_rk3588_pll_init(struct clk_hw *hw)
-{
- struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
-
- if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
- return 0;
-
- return 0;
-}
-
static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
.recalc_rate = rockchip_rk3588_pll_recalc_rate,
.enable = rockchip_rk3588_pll_enable,
@@ -1051,7 +1041,6 @@ static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
.enable = rockchip_rk3588_pll_enable,
.disable = rockchip_rk3588_pll_disable,
.is_enabled = rockchip_rk3588_pll_is_enabled,
- .init = rockchip_rk3588_pll_init,
};
/*
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index d341ce0708aa..df9330958c83 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -123,6 +123,7 @@ PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
PNAME(mux_pll_src_dmyapll_dpll_gpll_xin24_p) = { "dummy_apll", "dpll", "gpll", "xin24m" };
+PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" };
PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" };
@@ -423,6 +424,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
+
+ MUX(SCLK_USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ RK2928_MISC_CON, 15, 1, MFLAGS),
};
static const char *const rk3036_critical_clocks[] __initconst = {
@@ -431,6 +435,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_peri",
"pclk_ddrupctl",
+ "ddrphy",
};
static void __init rk3036_clk_init(struct device_node *np)
@@ -438,7 +443,6 @@ static void __init rk3036_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -462,11 +466,6 @@ static void __init rk3036_clk_init(struct device_node *np)
return;
}
- clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock usb480m: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_plls(ctx, rk3036_pll_clks,
ARRAY_SIZE(rk3036_pll_clks),
RK3036_GRF_SOC_STATUS0);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 90d329216064..0a1e017df7c6 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -418,7 +418,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 11, GFLAGS),
MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
- RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
+ RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS, grf_type_sys),
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index cf60fcf2fa5c..cd5f65b6cdf5 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -677,9 +677,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3328_CLKGATE_CON(3), 5, GFLAGS),
MUXGRF(SCLK_MAC2IO, "clk_mac2io", mux_mac2io_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON1, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON1, 10, 1, MFLAGS, grf_type_sys),
MUXGRF(SCLK_MAC2IO_EXT, "clk_mac2io_ext", mux_mac2io_ext_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_SOC_CON4, 14, 1, MFLAGS),
+ RK3328_GRF_SOC_CON4, 14, 1, MFLAGS, grf_type_sys),
COMPOSITE(SCLK_MAC2PHY_SRC, "clk_mac2phy_src", mux_2plls_p, 0,
RK3328_CLKSEL_CON(26), 7, 1, MFLAGS, 0, 5, DFLAGS,
@@ -692,7 +692,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(26), 8, 2, DFLAGS,
RK3328_CLKGATE_CON(9), 2, GFLAGS),
MUXGRF(SCLK_MAC2PHY, "clk_mac2phy", mux_mac2phy_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON2, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON2, 10, 1, MFLAGS, grf_type_sys),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
diff --git a/drivers/clk/rockchip/clk-rk3528.c b/drivers/clk/rockchip/clk-rk3528.c
index b8b577b902a0..a5ff64b93f8f 100644
--- a/drivers/clk/rockchip/clk-rk3528.c
+++ b/drivers/clk/rockchip/clk-rk3528.c
@@ -10,6 +10,9 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/rockchip,rk3528-cru.h>
@@ -1061,23 +1064,65 @@ static struct rockchip_clk_branch rk3528_clk_branches[] __initdata = {
0, 1, 1),
};
+static struct rockchip_clk_branch rk3528_vo_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDMMC_DRV, "sdmmc_drv", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(0), 1, grf_type_vo),
+ MMC_GRF(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(1), 1, grf_type_vo),
+};
+
+static struct rockchip_clk_branch rk3528_vpu_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDIO0_DRV, "sdio0_drv", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO0_SAMPLE, "sdio0_sample", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(1), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_DRV, "sdio1_drv", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_SAMPLE, "sdio1_sample", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(1), 1, grf_type_vpu),
+};
+
static int __init clk_rk3528_probe(struct platform_device *pdev)
{
- struct rockchip_clk_provider *ctx;
+ unsigned long nr_vpu_branches = ARRAY_SIZE(rk3528_vpu_clk_branches);
+ unsigned long nr_vo_branches = ARRAY_SIZE(rk3528_vo_clk_branches);
+ unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
+ unsigned long nr_clks, nr_vo_clks, nr_vpu_clks;
+ struct rockchip_aux_grf *vo_grf_e, *vpu_grf_e;
+ struct regmap *vo_grf, *vpu_grf;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
- unsigned long nr_clks;
+ struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
- nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
- nr_branches) + 1;
-
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return dev_err_probe(dev, PTR_ERR(reg_base),
"could not map cru region");
+ nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
+ nr_branches) + 1;
+
+ vo_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vo-grf");
+ if (!IS_ERR(vo_grf)) {
+ nr_vo_clks = rockchip_clk_find_max_clk_id(rk3528_vo_clk_branches,
+ nr_vo_branches) + 1;
+ nr_clks = max(nr_clks, nr_vo_clks);
+ } else if (PTR_ERR(vo_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vo_grf),
+ "failed to look up VO GRF\n");
+ }
+
+ vpu_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vpu-grf");
+ if (!IS_ERR(vpu_grf)) {
+ nr_vpu_clks = rockchip_clk_find_max_clk_id(rk3528_vpu_clk_branches,
+ nr_vpu_branches) + 1;
+ nr_clks = max(nr_clks, nr_vpu_clks);
+ } else if (PTR_ERR(vpu_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vpu_grf),
+ "failed to look up VPU GRF\n");
+ }
+
ctx = rockchip_clk_init(np, reg_base, nr_clks);
if (IS_ERR(ctx))
return dev_err_probe(dev, PTR_ERR(ctx),
@@ -1092,6 +1137,32 @@ static int __init clk_rk3528_probe(struct platform_device *pdev)
ARRAY_SIZE(rk3528_cpuclk_rates));
rockchip_clk_register_branches(ctx, rk3528_clk_branches, nr_branches);
+ if (!IS_ERR(vo_grf)) {
+ vo_grf_e = devm_kzalloc(dev, sizeof(*vo_grf_e), GFP_KERNEL);
+ if (!vo_grf_e)
+ return -ENOMEM;
+
+ vo_grf_e->grf = vo_grf;
+ vo_grf_e->type = grf_type_vo;
+ hash_add(ctx->aux_grf_table, &vo_grf_e->node, grf_type_vo);
+
+ rockchip_clk_register_branches(ctx, rk3528_vo_clk_branches,
+ nr_vo_branches);
+ }
+
+ if (!IS_ERR(vpu_grf)) {
+ vpu_grf_e = devm_kzalloc(dev, sizeof(*vpu_grf_e), GFP_KERNEL);
+ if (!vpu_grf_e)
+ return -ENOMEM;
+
+ vpu_grf_e->grf = vpu_grf;
+ vpu_grf_e->type = grf_type_vpu;
+ hash_add(ctx->aux_grf_table, &vpu_grf_e->node, grf_type_vpu);
+
+ rockchip_clk_register_branches(ctx, rk3528_vpu_clk_branches,
+ nr_vpu_branches);
+ }
+
rk3528_rst_init(np, reg_base);
rockchip_register_restart_notifier(ctx, RK3528_GLB_SRST_FST, NULL);
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 7d9279291e76..d48ab9d6c064 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -89,6 +89,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
RK3036_PLL_RATE(78750000, 4, 315, 6, 4, 1, 0),
RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE(33300000, 4, 111, 5, 4, 1, 0),
{ /* sentinel */ },
};
@@ -590,7 +591,7 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
RK3568_CLKSEL_CON(9), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3568_CLKGATE_CON(4), 0, GFLAGS),
MUXGRF(CLK_DDR1X, "clk_ddr1x", clk_ddr1x_p, CLK_SET_RATE_PARENT,
- RK3568_CLKSEL_CON(9), 15, 1, MFLAGS),
+ RK3568_CLKSEL_CON(9), 15, 1, MFLAGS, grf_type_sys),
COMPOSITE_NOMUX(CLK_MSCH, "clk_msch", "clk_ddr1x", CLK_IGNORE_UNUSED,
RK3568_CLKSEL_CON(10), 0, 2, DFLAGS,
diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c
index be703f250197..9bc0ef51ef68 100644
--- a/drivers/clk/rockchip/clk-rk3576.c
+++ b/drivers/clk/rockchip/clk-rk3576.c
@@ -10,11 +10,13 @@
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/rockchip,rk3576-cru.h>
#include "clk.h"
#define RK3576_GRF_SOC_STATUS0 0x600
#define RK3576_PMU0_GRF_OSC_CON6 0x18
+#define RK3576_VCCIO_IOC_MISC_CON0 0x6400
enum rk3576_plls {
bpll, lpll, vpll, aupll, cpll, gpll, ppll,
@@ -1481,6 +1483,14 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
RK3576_CLKGATE_CON(10), 0, GFLAGS),
GATE(CLK_SAI0_MCLKOUT, "clk_sai0_mclkout", "mclk_sai0_8ch", 0,
RK3576_CLKGATE_CON(10), 1, GFLAGS),
+ GATE_GRF(CLK_SAI0_MCLKOUT_TO_IO, "mclk_sai0_to_io", "clk_sai0_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 0, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI1_MCLKOUT_TO_IO, "mclk_sai1_to_io", "clk_sai1_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 1, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI2_MCLKOUT_TO_IO, "mclk_sai2_to_io", "clk_sai2_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 2, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI3_MCLKOUT_TO_IO, "mclk_sai3_to_io", "clk_sai3_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 3, GFLAGS, grf_type_ioc),
/* sdgmac */
COMPOSITE_NODIV(HCLK_SDGMAC_ROOT, "hclk_sdgmac_root", mux_200m_100m_50m_24m_p, 0,
@@ -1678,13 +1688,13 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
/* phy ref */
MUXGRF(CLK_PHY_REF_SRC, "clk_phy_ref_src", clk_phy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_USBPHY_REF_SRC, "clk_usbphy_ref_src", clk_usbphy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_CPLL_REF_SRC, "clk_cpll_ref_src", clk_cpll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_AUPLL_REF_SRC, "clk_aupll_ref_src", clk_aupll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS, grf_type_pmu0),
/* secure ns */
COMPOSITE_NODIV(ACLK_SECURE_NS, "aclk_secure_ns", mux_350m_175m_116m_24m_p, CLK_IS_CRITICAL,
@@ -1727,17 +1737,26 @@ static void __init rk3576_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct regmap *grf;
+ struct rockchip_aux_grf *ioc_grf_e;
+ struct rockchip_aux_grf *pmu0_grf_e;
+ struct regmap *ioc_grf;
+ struct regmap *pmu0_grf;
clk_nr_clks = rockchip_clk_find_max_clk_id(rk3576_clk_branches,
ARRAY_SIZE(rk3576_clk_branches)) + 1;
- grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
- if (IS_ERR(grf)) {
+ pmu0_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
+ if (IS_ERR(pmu0_grf)) {
pr_err("%s: could not get PMU0 GRF syscon\n", __func__);
return;
}
+ ioc_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-ioc-grf");
+ if (IS_ERR(ioc_grf)) {
+ pr_err("%s: could not get IOC GRF syscon\n", __func__);
+ return;
+ }
+
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
@@ -1747,11 +1766,24 @@ static void __init rk3576_clk_init(struct device_node *np)
ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
- iounmap(reg_base);
- return;
+ goto err_unmap;
}
- ctx->grf = grf;
+ pmu0_grf_e = kzalloc(sizeof(*pmu0_grf_e), GFP_KERNEL);
+ if (!pmu0_grf_e)
+ goto err_unmap;
+
+ pmu0_grf_e->grf = pmu0_grf;
+ pmu0_grf_e->type = grf_type_pmu0;
+ hash_add(ctx->aux_grf_table, &pmu0_grf_e->node, grf_type_pmu0);
+
+ ioc_grf_e = kzalloc(sizeof(*ioc_grf_e), GFP_KERNEL);
+ if (!ioc_grf_e)
+ goto err_free_pmu0;
+
+ ioc_grf_e->grf = ioc_grf;
+ ioc_grf_e->type = grf_type_ioc;
+ hash_add(ctx->aux_grf_table, &ioc_grf_e->node, grf_type_ioc);
rockchip_clk_register_plls(ctx, rk3576_pll_clks,
ARRAY_SIZE(rk3576_pll_clks),
@@ -1774,6 +1806,14 @@ static void __init rk3576_clk_init(struct device_node *np)
rockchip_register_restart_notifier(ctx, RK3576_GLB_SRST_FST, NULL);
rockchip_clk_of_add_provider(np, ctx);
+
+ return;
+
+err_free_pmu0:
+ kfree(pmu0_grf_e);
+err_unmap:
+ iounmap(reg_base);
+ return;
}
CLK_OF_DECLARE(rk3576_cru, "rockchip,rk3576-cru", rk3576_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 4031733def4e..1694223f4f84 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -64,6 +64,7 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = {
RK3588_PLL_RATE(1560000000, 2, 260, 1, 0),
RK3588_PLL_RATE(1536000000, 2, 256, 1, 0),
RK3588_PLL_RATE(1512000000, 2, 252, 1, 0),
+ RK3588_PLL_RATE(1500000000, 2, 250, 1, 0),
RK3588_PLL_RATE(1488000000, 2, 248, 1, 0),
RK3588_PLL_RATE(1464000000, 2, 244, 1, 0),
RK3588_PLL_RATE(1440000000, 2, 240, 1, 0),
diff --git a/drivers/clk/rockchip/clk-rv1126.c b/drivers/clk/rockchip/clk-rv1126.c
index fc19c5522490..15e7bfe84506 100644
--- a/drivers/clk/rockchip/clk-rv1126.c
+++ b/drivers/clk/rockchip/clk-rv1126.c
@@ -857,7 +857,7 @@ static struct rockchip_clk_branch rv1126_clk_branches[] __initdata = {
RV1126_GMAC_CON, 5, 1, MFLAGS),
MUXGRF(CLK_GMAC_SRC, "clk_gmac_src", mux_clk_gmac_src_p, CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT,
- RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS),
+ RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS, grf_type_sys),
GATE(CLK_GMAC_REF, "clk_gmac_ref", "clk_gmac_src", 0,
RV1126_CLKGATE_CON(20), 7, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index cbf93ea119a9..19caf26c991b 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -382,6 +382,8 @@ static struct rockchip_clk_provider *rockchip_clk_init_base(
ctx->cru_node = np;
spin_lock_init(&ctx->lock);
+ hash_init(ctx->aux_grf_table);
+
ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
"rockchip,grf");
@@ -496,6 +498,8 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
{
+ struct regmap *grf = ctx->grf;
+ struct rockchip_aux_grf *agrf;
struct clk *clk;
unsigned int idx;
unsigned long flags;
@@ -504,6 +508,19 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
flags = list->flags;
clk = NULL;
+ /* for GRF-dependent branches, choose the right grf first */
+ if ((list->branch_type == branch_grf_mux ||
+ list->branch_type == branch_grf_gate ||
+ list->branch_type == branch_grf_mmc) &&
+ list->grf_type != grf_type_sys) {
+ hash_for_each_possible(ctx->aux_grf_table, agrf, node, list->grf_type) {
+ if (agrf->type == list->grf_type) {
+ grf = agrf->grf;
+ break;
+ }
+ }
+ }
+
/* catch simple muxes */
switch (list->branch_type) {
case branch_mux:
@@ -523,10 +540,10 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->mux_shift, list->mux_width,
list->mux_flags, &ctx->lock);
break;
- case branch_muxgrf:
+ case branch_grf_mux:
clk = rockchip_clk_register_muxgrf(list->name,
list->parent_names, list->num_parents,
- flags, ctx->grf, list->muxdiv_offset,
+ flags, grf, list->muxdiv_offset,
list->mux_shift, list->mux_width,
list->mux_flags);
break;
@@ -573,6 +590,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
ctx->reg_base + list->gate_offset,
list->gate_shift, list->gate_flags, &ctx->lock);
break;
+ case branch_grf_gate:
+ flags |= CLK_SET_RATE_PARENT;
+ clk = rockchip_clk_register_gate_grf(list->name,
+ list->parent_names[0], flags, grf,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags);
+ break;
case branch_composite:
clk = rockchip_clk_register_branch(list->name,
list->parent_names, list->num_parents,
@@ -590,6 +614,16 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->name,
list->parent_names, list->num_parents,
ctx->reg_base + list->muxdiv_offset,
+ NULL, 0,
+ list->div_shift
+ );
+ break;
+ case branch_grf_mmc:
+ clk = rockchip_clk_register_mmc(
+ list->name,
+ list->parent_names, list->num_parents,
+ NULL,
+ grf, list->muxdiv_offset,
list->div_shift
);
break;
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index df2b2d706450..1e9c3c0d31e3 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/clk-provider.h>
+#include <linux/hashtable.h>
struct clk;
@@ -217,6 +218,9 @@ struct clk;
#define RK3528_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
#define RK3528_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
#define RK3528_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3528_SDMMC_CON(x) ((x) * 0x4 + 0x24)
+#define RK3528_SDIO0_CON(x) ((x) * 0x4 + 0x4)
+#define RK3528_SDIO1_CON(x) ((x) * 0x4 + 0xc)
#define RK3528_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PMU_CRU_BASE)
#define RK3528_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_PMU_CRU_BASE)
#define RK3528_PCIE_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PCIE_CRU_BASE)
@@ -440,12 +444,37 @@ enum rockchip_pll_type {
.k = _k, \
}
+enum rockchip_grf_type {
+ grf_type_sys = 0,
+ grf_type_pmu0,
+ grf_type_pmu1,
+ grf_type_ioc,
+ grf_type_vo,
+ grf_type_vpu,
+};
+
+/* ceil(sqrt(enums in rockchip_grf_type - 1)) */
+#define GRF_HASH_ORDER 2
+
+/**
+ * struct rockchip_aux_grf - entry for the aux_grf_table hashtable
+ * @grf: pointer to the grf this entry references
+ * @type: what type of GRF this is
+ * @node: hlist node
+ */
+struct rockchip_aux_grf {
+ struct regmap *grf;
+ enum rockchip_grf_type type;
+ struct hlist_node node;
+};
+
/**
* struct rockchip_clk_provider - information about clock provider
* @reg_base: virtual address for the register base.
* @clk_data: holds clock related data like clk* and number of clocks.
* @cru_node: device-node of the clock-provider
* @grf: regmap of the general-register-files syscon
+ * @aux_grf_table: hashtable of auxiliary GRF regmaps, indexed by grf_type
* @lock: maintains exclusion between callbacks for a given clock-provider.
*/
struct rockchip_clk_provider {
@@ -453,6 +482,7 @@ struct rockchip_clk_provider {
struct clk_onecell_data clk_data;
struct device_node *cru_node;
struct regmap *grf;
+ DECLARE_HASHTABLE(aux_grf_table, GRF_HASH_ORDER);
spinlock_t lock;
};
@@ -594,7 +624,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift);
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift);
/*
* DDRCLK flags, including method of setting the rate
@@ -622,17 +654,24 @@ struct clk *rockchip_clk_register_muxgrf(const char *name,
int flags, struct regmap *grf, int reg,
int shift, int width, int mux_flags);
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg,
+ unsigned int shift, u8 gate_flags);
+
#define PNAME(x) static const char *const x[] __initconst
enum rockchip_clk_branch_type {
branch_composite,
branch_mux,
- branch_muxgrf,
+ branch_grf_mux,
branch_divider,
branch_fraction_divider,
branch_gate,
+ branch_grf_gate,
branch_linked_gate,
branch_mmc,
+ branch_grf_mmc,
branch_inverter,
branch_factor,
branch_ddrclk,
@@ -660,6 +699,7 @@ struct rockchip_clk_branch {
u8 gate_shift;
u8 gate_flags;
unsigned int linked_clk_id;
+ enum rockchip_grf_type grf_type;
struct rockchip_clk_branch *child;
};
@@ -900,10 +940,10 @@ struct rockchip_clk_branch {
.mux_table = mt, \
}
-#define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \
+#define MUXGRF(_id, cname, pnames, f, o, s, w, mf, gt) \
{ \
.id = _id, \
- .branch_type = branch_muxgrf, \
+ .branch_type = branch_grf_mux, \
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
@@ -913,6 +953,7 @@ struct rockchip_clk_branch {
.mux_width = w, \
.mux_flags = mf, \
.gate_offset = -1, \
+ .grf_type = gt, \
}
#define DIV(_id, cname, pname, f, o, s, w, df) \
@@ -958,6 +999,20 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define GATE_GRF(_id, cname, pname, f, o, b, gf, gt) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_gate, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .gate_offset = o, \
+ .gate_shift = b, \
+ .gate_flags = gf, \
+ .grf_type = gt, \
+ }
+
#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
{ \
.id = _id, \
@@ -983,6 +1038,18 @@ struct rockchip_clk_branch {
.div_shift = shift, \
}
+#define MMC_GRF(_id, cname, pname, offset, shift, grftype) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_mmc, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .muxdiv_offset = offset, \
+ .div_shift = shift, \
+ .grf_type = grftype, \
+ }
+
#define INVERTER(_id, cname, pname, io, is, if) \
{ \
.id = _id, \
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 374c26e5d9fd..cc5c1644c41c 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1269,6 +1269,45 @@ static const struct samsung_cpu_clock exynos4412_cpu_clks[] __initconst = {
CPUCLK_LAYOUT_E4210, e4412_armclk_d),
};
+static const struct samsung_cmu_info cmu_info_exynos4 __initconst = {
+ .mux_clks = exynos4_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4_mux_clks),
+ .div_clks = exynos4_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4_div_clks),
+ .gate_clks = exynos4_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4_gate_clks),
+ .fixed_factor_clks = exynos4_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4_fixed_factor_clks),
+ .fixed_clks = exynos4_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4_fixed_rate_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4210 __initconst = {
+ .mux_clks = exynos4210_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4210_mux_clks),
+ .div_clks = exynos4210_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4210_div_clks),
+ .gate_clks = exynos4210_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4210_gate_clks),
+ .fixed_factor_clks = exynos4210_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4210_fixed_factor_clks),
+ .fixed_clks = exynos4210_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4210_fixed_rate_clks),
+ .cpu_clks = exynos4210_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(exynos4210_cpu_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4x12 __initconst = {
+ .mux_clks = exynos4x12_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4x12_mux_clks),
+ .div_clks = exynos4x12_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4x12_div_clks),
+ .gate_clks = exynos4x12_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4x12_gate_clks),
+ .fixed_factor_clks = exynos4x12_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4x12_fixed_factor_clks),
+};
+
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
@@ -1322,41 +1361,12 @@ static void __init exynos4_clk_init(struct device_node *np,
ARRAY_SIZE(exynos4x12_plls));
}
- samsung_clk_register_fixed_rate(ctx, exynos4_fixed_rate_clks,
- ARRAY_SIZE(exynos4_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4_mux_clks,
- ARRAY_SIZE(exynos4_mux_clks));
- samsung_clk_register_div(ctx, exynos4_div_clks,
- ARRAY_SIZE(exynos4_div_clks));
- samsung_clk_register_gate(ctx, exynos4_gate_clks,
- ARRAY_SIZE(exynos4_gate_clks));
- samsung_clk_register_fixed_factor(ctx, exynos4_fixed_factor_clks,
- ARRAY_SIZE(exynos4_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4);
if (exynos4_soc == EXYNOS4210) {
- samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks,
- ARRAY_SIZE(exynos4210_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4210_mux_clks,
- ARRAY_SIZE(exynos4210_mux_clks));
- samsung_clk_register_div(ctx, exynos4210_div_clks,
- ARRAY_SIZE(exynos4210_div_clks));
- samsung_clk_register_gate(ctx, exynos4210_gate_clks,
- ARRAY_SIZE(exynos4210_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4210_fixed_factor_clks,
- ARRAY_SIZE(exynos4210_fixed_factor_clks));
- samsung_clk_register_cpu(ctx, exynos4210_cpu_clks,
- ARRAY_SIZE(exynos4210_cpu_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4210);
} else {
- samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
- ARRAY_SIZE(exynos4x12_mux_clks));
- samsung_clk_register_div(ctx, exynos4x12_div_clks,
- ARRAY_SIZE(exynos4x12_div_clks));
- samsung_clk_register_gate(ctx, exynos4x12_gate_clks,
- ARRAY_SIZE(exynos4x12_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4x12_fixed_factor_clks,
- ARRAY_SIZE(exynos4x12_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4x12);
if (soc == EXYNOS4412)
samsung_clk_register_cpu(ctx, exynos4412_cpu_clks,
ARRAY_SIZE(exynos4412_cpu_clks));
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index dc8d4240f6de..da4afe8ac2ab 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -18,6 +18,9 @@
/* NOTE: Must be equal to the last clock ID increased by one */
#define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1)
+#define CLKS_NR_CPUCL0 (CLK_DOUT_CPUCL0_NOCP + 1)
+#define CLKS_NR_CPUCL1 (CLK_DOUT_CPUCL1_NOCP + 1)
+#define CLKS_NR_CPUCL2 (CLK_DOUT_CPUCL2_NOCP + 1)
#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1)
#define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_I3C + 1)
#define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1)
@@ -1005,6 +1008,339 @@ static void __init exynosautov920_cmu_top_init(struct device_node *np)
CLK_OF_DECLARE(exynosautov920_cmu_top, "samsung,exynosautov920-cmu-top",
exynosautov920_cmu_top_init);
+/* ---- CMU_CPUCL0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL0 (0x1EC00000) */
+#define PLL_LOCKTIME_PLL_CPUCL0 0x0000
+#define PLL_CON0_PLL_CPUCL0 0x0100
+#define PLL_CON1_PLL_CPUCL0 0x0104
+#define PLL_CON3_PLL_CPUCL0 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER 0x0620
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC 0x181c
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG 0x1820
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP 0x1824
+
+static const unsigned long cpucl0_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL0,
+ PLL_CON0_PLL_CPUCL0,
+ PLL_CON1_PLL_CPUCL0,
+ PLL_CON3_PLL_CPUCL0,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL0 */
+PNAME(mout_pll_cpucl0_p) = { "oscclk", "fout_cpucl0_pll" };
+PNAME(mout_cpucl0_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl0_cluster" };
+PNAME(mout_cpucl0_dbg_user_p) = { "oscclk", "dout_clkcmu_cpucl0_dbg" };
+PNAME(mout_cpucl0_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl0_switch" };
+PNAME(mout_cpucl0_cluster_p) = { "oscclk", "mout_cpucl0_cluster_user",
+ "mout_cpucl0_switch_user"};
+PNAME(mout_cpucl0_core_p) = { "oscclk", "mout_pll_cpucl0",
+ "mout_cpucl0_switch_user"};
+
+static const struct samsung_pll_rate_table cpu_pll_rates[] __initconst = {
+ PLL_35XX_RATE(38400000U, 2400000000U, 250, 4, 0),
+ PLL_35XX_RATE(38400000U, 2304000000U, 240, 4, 0),
+ PLL_35XX_RATE(38400000U, 2208000000U, 230, 4, 0),
+ PLL_35XX_RATE(38400000U, 2112000000U, 220, 4, 0),
+ PLL_35XX_RATE(38400000U, 2016000000U, 210, 4, 0),
+ PLL_35XX_RATE(38400000U, 1824000000U, 190, 4, 0),
+ PLL_35XX_RATE(38400000U, 1680000000U, 175, 4, 0),
+ PLL_35XX_RATE(38400000U, 1344000000U, 140, 4, 0),
+ PLL_35XX_RATE(38400000U, 1152000000U, 120, 4, 0),
+ PLL_35XX_RATE(38400000U, 576000000U, 120, 4, 1),
+ PLL_35XX_RATE(38400000U, 288000000U, 120, 4, 2),
+};
+
+static const struct samsung_pll_clock cpucl0_pll_clks[] __initconst = {
+ /* CMU_CPUCL0_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL0_PLL, "fout_cpucl0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL0, PLL_CON3_PLL_CPUCL0, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL0, "mout_pll_cpucl0", mout_pll_cpucl0_p,
+ PLL_CON0_PLL_CPUCL0, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER_USER, "mout_cpucl0_cluster_user", mout_cpucl0_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_DBG_USER, "mout_cpucl0_dbg_user", mout_cpucl0_dbg_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_SWITCH_USER, "mout_cpucl0_switch_user", mout_cpucl0_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER, "mout_cpucl0_cluster", mout_cpucl0_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL0_CORE, "mout_cpucl0_core", mout_cpucl0_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER0_ACLK, "dout_cluster0_aclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_ATCLK, "dout_cluster0_atclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_MPCLK, "dout_cluster0_mpclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PCLK, "dout_cluster0_pclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PERIPHCLK, "dout_cluster0_periphclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL0_DBG_NOC, "dout_cpucl0_dbg_noc",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_DBG_PCLKDBG, "dout_cpucl0_dbg_pclkdbg",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_NOCP, "dout_cpucl0_nocp",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl0_cmu_info __initconst = {
+ .pll_clks = cpucl0_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl0_pll_clks),
+ .mux_clks = cpucl0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl0_mux_clks),
+ .div_clks = cpucl0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl0_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL0,
+ .clk_regs = cpucl0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl0_clk_regs),
+ .clk_name = "cpucl0",
+};
+
+static void __init exynosautov920_cmu_cpucl0_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl0_cmu_info);
+}
+
+/* Register CMU_CPUCL0 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl0, "samsung,exynosautov920-cmu-cpucl0",
+ exynosautov920_cmu_cpucl0_init);
+
+/* ---- CMU_CPUCL1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL1 (0x1ED00000) */
+#define PLL_LOCKTIME_PLL_CPUCL1 0x0000
+#define PLL_CON0_PLL_CPUCL1 0x0100
+#define PLL_CON1_PLL_CPUCL1 0x0104
+#define PLL_CON3_PLL_CPUCL1 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP 0x181c
+
+static const unsigned long cpucl1_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL1,
+ PLL_CON0_PLL_CPUCL1,
+ PLL_CON1_PLL_CPUCL1,
+ PLL_CON3_PLL_CPUCL1,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL1 */
+PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
+PNAME(mout_cpucl1_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl1_cluster" };
+PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl1_switch" };
+PNAME(mout_cpucl1_cluster_p) = { "oscclk", "mout_cpucl1_cluster_user",
+ "mout_cpucl1_switch_user"};
+PNAME(mout_cpucl1_core_p) = { "oscclk", "mout_pll_cpucl1",
+ "mout_cpucl1_switch_user"};
+
+static const struct samsung_pll_clock cpucl1_pll_clks[] __initconst = {
+ /* CMU_CPUCL1_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL1_PLL, "fout_cpucl1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL1, PLL_CON3_PLL_CPUCL1, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL1, "mout_pll_cpucl1", mout_pll_cpucl1_p,
+ PLL_CON0_PLL_CPUCL1, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER_USER, "mout_cpucl1_cluster_user", mout_cpucl1_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_SWITCH_USER, "mout_cpucl1_switch_user", mout_cpucl1_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER, "mout_cpucl1_cluster", mout_cpucl1_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL1_CORE, "mout_cpucl1_core", mout_cpucl1_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER1_ACLK, "dout_cluster1_aclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_ATCLK, "dout_cluster1_atclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_MPCLK, "dout_cluster1_mpclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PCLK, "dout_cluster1_pclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PERIPHCLK, "dout_cluster1_periphclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL1_NOCP, "dout_cpucl1_nocp",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl1_cmu_info __initconst = {
+ .pll_clks = cpucl1_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl1_pll_clks),
+ .mux_clks = cpucl1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl1_mux_clks),
+ .div_clks = cpucl1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl1_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL1,
+ .clk_regs = cpucl1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl1_clk_regs),
+ .clk_name = "cpucl1",
+};
+
+static void __init exynosautov920_cmu_cpucl1_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl1_cmu_info);
+}
+
+/* Register CMU_CPUCL1 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl1, "samsung,exynosautov920-cmu-cpucl1",
+ exynosautov920_cmu_cpucl1_init);
+
+/* ---- CMU_CPUCL2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL2 (0x1EE00000) */
+#define PLL_LOCKTIME_PLL_CPUCL2 0x0000
+#define PLL_CON0_PLL_CPUCL2 0x0100
+#define PLL_CON1_PLL_CPUCL2 0x0104
+#define PLL_CON3_PLL_CPUCL2 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP 0x181c
+
+static const unsigned long cpucl2_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL2,
+ PLL_CON0_PLL_CPUCL2,
+ PLL_CON1_PLL_CPUCL2,
+ PLL_CON3_PLL_CPUCL2,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL2 */
+PNAME(mout_pll_cpucl2_p) = { "oscclk", "fout_cpucl2_pll" };
+PNAME(mout_cpucl2_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl2_cluster" };
+PNAME(mout_cpucl2_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl2_switch" };
+PNAME(mout_cpucl2_cluster_p) = { "oscclk", "mout_cpucl2_cluster_user",
+ "mout_cpucl2_switch_user"};
+PNAME(mout_cpucl2_core_p) = { "oscclk", "mout_pll_cpucl2",
+ "mout_cpucl2_switch_user"};
+
+static const struct samsung_pll_clock cpucl2_pll_clks[] __initconst = {
+ /* CMU_CPUCL2_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL2_PLL, "fout_cpucl2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL2, PLL_CON3_PLL_CPUCL2, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL2, "mout_pll_cpucl2", mout_pll_cpucl2_p,
+ PLL_CON0_PLL_CPUCL2, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER_USER, "mout_cpucl2_cluster_user", mout_cpucl2_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_SWITCH_USER, "mout_cpucl2_switch_user", mout_cpucl2_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER, "mout_cpucl2_cluster", mout_cpucl2_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL2_CORE, "mout_cpucl2_core", mout_cpucl2_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER2_ACLK, "dout_cluster2_aclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_ATCLK, "dout_cluster2_atclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_MPCLK, "dout_cluster2_mpclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PCLK, "dout_cluster2_pclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PERIPHCLK, "dout_cluster2_periphclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL2_NOCP, "dout_cpucl2_nocp",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl2_cmu_info __initconst = {
+ .pll_clks = cpucl2_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl2_pll_clks),
+ .mux_clks = cpucl2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl2_mux_clks),
+ .div_clks = cpucl2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl2_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL2,
+ .clk_regs = cpucl2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl2_clk_regs),
+ .clk_name = "cpucl2",
+};
+
+static void __init exynosautov920_cmu_cpucl2_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl2_cmu_info);
+}
+
+/* Register CMU_CPUCL2 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl2, "samsung,exynosautov920-cmu-cpucl2",
+ exynosautov920_cmu_cpucl2_init);
+
/* ---- CMU_PERIC0 --------------------------------------------------------- */
/* Register Offset definitions for CMU_PERIC0 (0x10800000) */
@@ -1393,7 +1729,7 @@ static const unsigned long hsi1_clk_regs[] __initconst = {
/* List of parent clocks for Muxes in CMU_HSI1 */
PNAME(mout_hsi1_mmc_card_user_p) = {"oscclk", "dout_clkcmu_hsi1_mmc_card"};
PNAME(mout_hsi1_noc_user_p) = { "oscclk", "dout_clkcmu_hsi1_noc" };
-PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "mout_clkcmu_hsi1_usbdrd" };
+PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "dout_clkcmu_hsi1_usbdrd" };
PNAME(mout_hsi1_usbdrd_p) = { "dout_tcxo_div2", "mout_hsi1_usbdrd_user" };
static const struct samsung_mux_clock hsi1_mux_clks[] __initconst = {
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 1d82737befd3..a88c212bda12 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -83,9 +83,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long mdiv;
- unsigned long refdiv;
- unsigned long reg;
+ u32 mdiv;
+ u32 refdiv;
+ u32 reg;
unsigned long long vco_freq;
/* read VCO1 reg for numerator and denominator */
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 9dcc1b2d2cc0..03a96139a576 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -39,9 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, reg;
+ u32 divf, divq, reg;
unsigned long long vco_freq;
- unsigned long bypass;
+ u32 bypass;
reg = readl(socfpgaclk->hw.reg);
bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
diff --git a/drivers/clk/sophgo/Kconfig b/drivers/clk/sophgo/Kconfig
index 8b1367e3a95e..e14e802f28bf 100644
--- a/drivers/clk/sophgo/Kconfig
+++ b/drivers/clk/sophgo/Kconfig
@@ -37,3 +37,22 @@ config CLK_SOPHGO_SG2042_RPGATE
This clock IP depends on SG2042 Clock Generator because it uses
clock from Clock Generator IP as input.
This driver provides Gate function for RP.
+
+config CLK_SOPHGO_SG2044
+ tristate "Sophgo SG2044 clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This driver supports the clock controller on the Sophgo SG2044
+ SoC. This controller requires mulitple PLL clock as input.
+ This clock control provides PLL clocks and common clock function
+ for various IPs on the SoC.
+
+config CLK_SOPHGO_SG2044_PLL
+ tristate "Sophgo SG2044 PLL clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ select MFD_SYSCON
+ select REGMAP_MMIO
+ help
+ This driver supports the PLL clock controller on the Sophgo
+ SG2044 SoC. This controller requires 25M oscillator as input.
+ This clock control provides PLL clocks on the SoC.
diff --git a/drivers/clk/sophgo/Makefile b/drivers/clk/sophgo/Makefile
index 53506845a044..26b2fd121582 100644
--- a/drivers/clk/sophgo/Makefile
+++ b/drivers/clk/sophgo/Makefile
@@ -9,3 +9,5 @@ clk-sophgo-cv1800-y += clk-cv18xx-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_CLKGEN) += clk-sg2042-clkgen.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_PLL) += clk-sg2042-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_RPGATE) += clk-sg2042-rpgate.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044) += clk-sg2044.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044_PLL) += clk-sg2044-pll.o
diff --git a/drivers/clk/sophgo/clk-cv1800.c b/drivers/clk/sophgo/clk-cv1800.c
index e0c4dc347579..a4116ac1adcb 100644
--- a/drivers/clk/sophgo/clk-cv1800.c
+++ b/drivers/clk/sophgo/clk-cv1800.c
@@ -1519,7 +1519,9 @@ static int cv1800_clk_probe(struct platform_device *pdev)
static const struct of_device_id cv1800_clk_ids[] = {
{ .compatible = "sophgo,cv1800-clk", .data = &cv1800_desc },
+ { .compatible = "sophgo,cv1800b-clk", .data = &cv1800_desc },
{ .compatible = "sophgo,cv1810-clk", .data = &cv1810_desc },
+ { .compatible = "sophgo,cv1812h-clk", .data = &cv1810_desc },
{ .compatible = "sophgo,sg2000-clk", .data = &sg2000_desc },
{ }
};
diff --git a/drivers/clk/sophgo/clk-sg2044-pll.c b/drivers/clk/sophgo/clk-sg2044-pll.c
new file mode 100644
index 000000000000..94c0f519ba6d
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044-pll.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 PLL clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-pll.h>
+
+/* Low Control part */
+#define PLL_VCOSEL_MASK GENMASK(17, 16)
+
+/* High Control part */
+#define PLL_FBDIV_MASK GENMASK(11, 0)
+#define PLL_REFDIV_MASK GENMASK(17, 12)
+#define PLL_POSTDIV1_MASK GENMASK(20, 18)
+#define PLL_POSTDIV2_MASK GENMASK(23, 21)
+
+#define PLL_CALIBRATE_EN BIT(24)
+#define PLL_CALIBRATE_MASK GENMASK(29, 27)
+#define PLL_CALIBRATE_DEFAULT FIELD_PREP(PLL_CALIBRATE_MASK, 2)
+#define PLL_UPDATE_EN BIT(30)
+
+#define PLL_HIGH_CTRL_MASK \
+ (PLL_FBDIV_MASK | PLL_REFDIV_MASK | \
+ PLL_POSTDIV1_MASK | PLL_POSTDIV2_MASK | \
+ PLL_CALIBRATE_EN | PLL_CALIBRATE_MASK | \
+ PLL_UPDATE_EN)
+
+#define PLL_HIGH_CTRL_OFFSET 4
+
+#define PLL_VCOSEL_1G6 0x2
+#define PLL_VCOSEL_2G4 0x3
+
+#define PLL_LIMIT_FOUTVCO 0
+#define PLL_LIMIT_FOUT 1
+#define PLL_LIMIT_REFDIV 2
+#define PLL_LIMIT_FBDIV 3
+#define PLL_LIMIT_POSTDIV1 4
+#define PLL_LIMIT_POSTDIV2 5
+
+#define for_each_pll_limit_range(_var, _limit) \
+ for (_var = (_limit)->min; _var <= (_limit)->max; _var++)
+
+struct sg2044_pll_limit {
+ u64 min;
+ u64 max;
+};
+
+struct sg2044_pll_internal {
+ u32 ctrl_offset;
+ u32 status_offset;
+ u32 enable_offset;
+
+ u8 status_lock_bit;
+ u8 status_updating_bit;
+ u8 enable_bit;
+
+ const struct sg2044_pll_limit *limits;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_pll {
+ struct sg2044_clk_common common;
+ struct sg2044_pll_internal pll;
+ unsigned int syscon_offset;
+};
+
+struct sg2044_pll_desc_data {
+ struct sg2044_clk_common * const *pll;
+ u16 num_pll;
+};
+
+#define SG2044_SYSCON_PLL_OFFSET 0x98
+
+struct sg2044_pll_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline bool sg2044_clk_fit_limit(u64 value,
+ const struct sg2044_pll_limit *limit)
+{
+ return value >= limit->min && value <= limit->max;
+}
+
+static inline struct sg2044_pll *hw_to_sg2044_pll(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_pll, common);
+}
+
+static unsigned long sg2044_pll_calc_vco_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv)
+{
+ u64 numerator = parent_rate * fbdiv;
+
+ return div64_ul(numerator, refdiv);
+}
+
+static unsigned long sg2044_pll_calc_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv,
+ unsigned long postdiv1,
+ unsigned long postdiv2)
+{
+ u64 numerator, denominator;
+
+ numerator = parent_rate * fbdiv;
+ denominator = refdiv * (postdiv1 + 1) * (postdiv2 + 1);
+
+ return div64_u64(numerator, denominator);
+}
+
+static unsigned long sg2044_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ u32 value;
+ int ret;
+
+ ret = regmap_read(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset + PLL_HIGH_CTRL_OFFSET,
+ &value);
+ if (ret < 0)
+ return 0;
+
+ return sg2044_pll_calc_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+}
+
+static bool pll_is_better_rate(unsigned long target, unsigned long now,
+ unsigned long best)
+{
+ return abs_diff(target, now) < abs_diff(target, best);
+}
+
+static int sg2042_pll_compute_postdiv(const struct sg2044_pll_limit *limits,
+ unsigned long target,
+ unsigned long parent_rate,
+ unsigned int refdiv,
+ unsigned int fbdiv,
+ unsigned int *postdiv1,
+ unsigned int *postdiv2)
+{
+ unsigned int div1, div2;
+ unsigned long tmp, best_rate = 0;
+ unsigned int best_div1 = 0, best_div2 = 0;
+
+ for_each_pll_limit_range(div2, &limits[PLL_LIMIT_POSTDIV2]) {
+ for_each_pll_limit_range(div1, &limits[PLL_LIMIT_POSTDIV1]) {
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ div1, div2);
+
+ if (tmp > target)
+ continue;
+
+ if (pll_is_better_rate(target, tmp, best_rate)) {
+ best_div1 = div1;
+ best_div2 = div2;
+ best_rate = tmp;
+
+ if (tmp == target)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *postdiv1 = best_div1;
+ *postdiv2 = best_div2;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_compute_pll_setting(const struct sg2044_pll_limit *limits,
+ unsigned long req_rate,
+ unsigned long parent_rate,
+ unsigned int *value)
+{
+ unsigned int refdiv, fbdiv, postdiv1, postdiv2;
+ unsigned int best_refdiv, best_fbdiv, best_postdiv1, best_postdiv2;
+ unsigned long tmp, best_rate = 0;
+ int ret;
+
+ for_each_pll_limit_range(fbdiv, &limits[PLL_LIMIT_FBDIV]) {
+ for_each_pll_limit_range(refdiv, &limits[PLL_LIMIT_REFDIV]) {
+ u64 vco = sg2044_pll_calc_vco_rate(parent_rate,
+ refdiv, fbdiv);
+ if (!sg2044_clk_fit_limit(vco, &limits[PLL_LIMIT_FOUTVCO]))
+ continue;
+
+ ret = sg2042_pll_compute_postdiv(limits,
+ req_rate, parent_rate,
+ refdiv, fbdiv,
+ &postdiv1, &postdiv2);
+ if (ret)
+ continue;
+
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ postdiv1, postdiv2);
+
+ if (pll_is_better_rate(req_rate, tmp, best_rate)) {
+ best_refdiv = refdiv;
+ best_fbdiv = fbdiv;
+ best_postdiv1 = postdiv1;
+ best_postdiv2 = postdiv2;
+ best_rate = tmp;
+
+ if (tmp == req_rate)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *value = FIELD_PREP(PLL_REFDIV_MASK, best_refdiv) |
+ FIELD_PREP(PLL_FBDIV_MASK, best_fbdiv) |
+ FIELD_PREP(PLL_POSTDIV1_MASK, best_postdiv1) |
+ FIELD_PREP(PLL_POSTDIV2_MASK, best_postdiv2);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 target;
+ int ret;
+
+ target = clamp(req->rate, pll->pll.limits[PLL_LIMIT_FOUT].min,
+ pll->pll.limits[PLL_LIMIT_FOUT].max);
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, target,
+ req->best_parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ req->rate = sg2044_pll_calc_rate(req->best_parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+
+ return 0;
+}
+
+static int sg2044_pll_poll_update(struct sg2044_pll *pll)
+{
+ int ret;
+ unsigned int value;
+
+ ret = regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (value & BIT(pll->pll.status_lock_bit)),
+ 1, 100000);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (!(value & BIT(pll->pll.status_updating_bit))),
+ 1, 100000);
+}
+
+static int sg2044_pll_enable(struct sg2044_pll *pll, bool en)
+{
+ if (en) {
+ if (sg2044_pll_poll_update(pll) < 0)
+ pr_warn("%s: fail to lock pll\n", clk_hw_get_name(&pll->common.hw));
+
+ return regmap_set_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+ }
+
+ return regmap_clear_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+}
+
+static int sg2044_pll_update_vcosel(struct sg2044_pll *pll, u64 rate)
+{
+ unsigned int sel;
+
+ if (rate < U64_C(2400000000))
+ sel = PLL_VCOSEL_1G6;
+ else
+ sel = PLL_VCOSEL_2G4;
+
+ return regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset,
+ PLL_VCOSEL_MASK,
+ FIELD_PREP(PLL_VCOSEL_MASK, sel));
+}
+
+static int sg2044_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 vco;
+ int ret;
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, rate,
+ parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ vco = sg2044_pll_calc_vco_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value));
+
+ value |= PLL_CALIBRATE_EN;
+ value |= PLL_CALIBRATE_DEFAULT;
+ value |= PLL_UPDATE_EN;
+
+ guard(spinlock_irqsave)(pll->common.lock);
+
+ ret = sg2044_pll_enable(pll, false);
+ if (ret)
+ return ret;
+
+ sg2044_pll_update_vcosel(pll, vco);
+
+ regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset +
+ PLL_HIGH_CTRL_OFFSET,
+ PLL_HIGH_CTRL_MASK, value);
+
+ sg2044_pll_enable(pll, true);
+
+ return ret;
+}
+
+static const struct clk_ops sg2044_pll_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+ .determine_rate = sg2044_pll_determine_rate,
+ .set_rate = sg2044_pll_set_rate,
+};
+
+static const struct clk_ops sg2044_pll_ro_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+};
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_PLL(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+#define DEFINE_SG2044_PLL_RO(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ro_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+static const struct clk_parent_data osc_parents[] = {
+ { .index = 0 },
+};
+
+static const struct sg2044_pll_limit pll_limits[] = {
+ [PLL_LIMIT_FOUTVCO] = {
+ .min = U64_C(1600000000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_FOUT] = {
+ .min = U64_C(25000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_REFDIV] = {
+ .min = U64_C(1),
+ .max = U64_C(63),
+ },
+ [PLL_LIMIT_FBDIV] = {
+ .min = U64_C(8),
+ .max = U64_C(1066),
+ },
+ [PLL_LIMIT_POSTDIV1] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+ [PLL_LIMIT_POSTDIV2] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+};
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL0, clk_fpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x58, 0x00, 22, 6,
+ 0x04, 6, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL1, clk_fpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x60, 0x00, 23, 7,
+ 0x04, 7, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL2, clk_fpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x20, 0x08, 16, 0,
+ 0x0c, 0, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL0, clk_dpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x68, 0x00, 24, 8,
+ 0x04, 8, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL1, clk_dpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x70, 0x00, 25, 9,
+ 0x04, 9, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL2, clk_dpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x78, 0x00, 26, 10,
+ 0x04, 10, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL3, clk_dpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x80, 0x00, 27, 11,
+ 0x04, 11, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL4, clk_dpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x88, 0x00, 28, 12,
+ 0x04, 12, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL5, clk_dpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x90, 0x00, 29, 13,
+ 0x04, 13, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL6, clk_dpll6, osc_parents, CLK_IS_CRITICAL,
+ 0x98, 0x00, 30, 14,
+ 0x04, 14, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL7, clk_dpll7, osc_parents, CLK_IS_CRITICAL,
+ 0xa0, 0x00, 31, 15,
+ 0x04, 15, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL0, clk_mpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x28, 0x00, 16, 0,
+ 0x04, 0, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL1, clk_mpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x30, 0x00, 17, 1,
+ 0x04, 1, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL2, clk_mpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x38, 0x00, 18, 2,
+ 0x04, 2, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL3, clk_mpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x40, 0x00, 19, 3,
+ 0x04, 3, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL4, clk_mpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x48, 0x00, 20, 4,
+ 0x04, 4, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL5, clk_mpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x50, 0x00, 21, 5,
+ 0x04, 5, pll_limits);
+
+static struct sg2044_clk_common * const sg2044_pll_commons[] = {
+ &clk_fpll0.common,
+ &clk_fpll1.common,
+ &clk_fpll2.common,
+ &clk_dpll0.common,
+ &clk_dpll1.common,
+ &clk_dpll2.common,
+ &clk_dpll3.common,
+ &clk_dpll4.common,
+ &clk_dpll5.common,
+ &clk_dpll6.common,
+ &clk_dpll7.common,
+ &clk_mpll0.common,
+ &clk_mpll1.common,
+ &clk_mpll2.common,
+ &clk_mpll3.common,
+ &clk_mpll4.common,
+ &clk_mpll5.common,
+};
+
+static int sg2044_pll_init_ctrl(struct device *dev, struct regmap *regmap,
+ struct sg2044_pll_ctrl *ctrl,
+ const struct sg2044_pll_desc_data *desc)
+{
+ int ret, i;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_pll; i++) {
+ struct sg2044_clk_common *common = desc->pll[i];
+ struct sg2044_pll *pll = hw_to_sg2044_pll(&common->hw);
+
+ common->lock = &ctrl->lock;
+ common->regmap = regmap;
+ pll->syscon_offset = SG2044_SYSCON_PLL_OFFSET;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_pll_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_pll_ctrl *ctrl;
+ const struct sg2044_pll_desc_data *desc;
+ struct regmap *regmap;
+
+ regmap = device_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "fail to get the regmap for PLL\n");
+
+ desc = (const struct sg2044_pll_desc_data *)platform_get_device_id(pdev)->driver_data;
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, desc->num_pll), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = desc->num_pll;
+
+ return sg2044_pll_init_ctrl(dev, regmap, ctrl, desc);
+}
+
+static const struct sg2044_pll_desc_data sg2044_pll_desc_data = {
+ .pll = sg2044_pll_commons,
+ .num_pll = ARRAY_SIZE(sg2044_pll_commons),
+};
+
+static const struct platform_device_id sg2044_pll_match[] = {
+ { .name = "sg2044-pll",
+ .driver_data = (unsigned long)&sg2044_pll_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sg2044_pll_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_pll_probe,
+ .driver = {
+ .name = "sg2044-pll",
+ },
+ .id_table = sg2044_pll_match,
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 pll clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sophgo/clk-sg2044.c b/drivers/clk/sophgo/clk-sg2044.c
new file mode 100644
index 000000000000..f67f99c926b6
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044.c
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-clk.h>
+
+#define DIV_ASSERT BIT(0)
+#define DIV_FACTOR_REG_SOURCE BIT(3)
+#define DIV_BRANCH_EN BIT(4)
+
+#define DIV_ASSERT_TIME 2
+
+struct sg2044_div_internal {
+ u32 offset;
+ u32 initval;
+ u8 shift;
+ u8 width;
+ u16 flags;
+};
+
+struct sg2044_mux_internal {
+ const u32 *table;
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_gate_internal {
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ void __iomem *base;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_div {
+ struct sg2044_clk_common common;
+ struct sg2044_div_internal div;
+};
+
+struct sg2044_mux {
+ struct sg2044_clk_common common;
+ struct sg2044_mux_internal mux;
+ struct notifier_block nb;
+ u8 saved_parent;
+};
+
+struct sg2044_gate {
+ struct sg2044_clk_common common;
+ struct sg2044_gate_internal gate;
+};
+
+struct sg2044_clk_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+struct sg2044_clk_desc_data {
+ struct sg2044_clk_common * const *pll;
+ struct sg2044_clk_common * const *div;
+ struct sg2044_clk_common * const *mux;
+ struct sg2044_clk_common * const *gate;
+ u16 num_pll;
+ u16 num_div;
+ u16 num_mux;
+ u16 num_gate;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline struct sg2044_div *hw_to_sg2044_div(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_div, common);
+}
+
+static u32 sg2044_div_get_reg_div(u32 reg, struct sg2044_div_internal *div)
+{
+ if ((reg & DIV_FACTOR_REG_SOURCE))
+ return (reg >> div->shift) & clk_div_mask(div->width);
+
+ return div->initval == 0 ? 1 : div->initval;
+}
+
+static unsigned long _sg2044_div_recalc_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ unsigned long parent_rate)
+{
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_recalc_rate(&common->hw, parent_rate, val, NULL,
+ div->flags, div->width);
+}
+
+static unsigned long sg2044_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_recalc_rate(&div->common, &div->div,
+ parent_rate);
+}
+
+static int _sg2044_div_determine_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ struct clk_rate_request *req)
+{
+ if (div->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_ro_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags,
+ val);
+ }
+
+ return divider_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags);
+}
+
+static int sg2044_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_determine_rate(&div->common, &div->div, req);
+}
+
+static void sg2044_div_set_reg_div(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ u32 value)
+{
+ void __iomem *addr = common->base + div->offset;
+ u32 reg;
+
+ reg = readl(addr);
+
+ /* assert */
+ reg &= ~DIV_ASSERT;
+ writel(reg, addr);
+
+ /* set value */
+ reg = readl(addr);
+ reg &= ~(clk_div_mask(div->width) << div->shift);
+ reg |= (value << div->shift) | DIV_FACTOR_REG_SOURCE;
+ writel(reg, addr);
+
+ /* de-assert */
+ reg |= DIV_ASSERT;
+ writel(reg, addr);
+}
+
+static int sg2044_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ u32 value;
+
+ value = divider_get_val(rate, parent_rate, NULL,
+ div->div.width, div->div.flags);
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ sg2044_div_set_reg_div(&div->common, &div->div, value);
+
+ return 0;
+}
+
+static int sg2044_div_enable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value |= DIV_BRANCH_EN;
+ writel(value, addr);
+
+ return 0;
+}
+
+static void sg2044_div_disable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value &= ~DIV_BRANCH_EN;
+ writel(value, addr);
+}
+
+static int sg2044_div_is_enabled(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return readl(div->common.base + div->div.offset) & DIV_BRANCH_EN;
+}
+
+static const struct clk_ops sg2044_gateable_div_ops = {
+ .enable = sg2044_div_enable,
+ .disable = sg2044_div_disable,
+ .is_enabled = sg2044_div_is_enabled,
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ro_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+};
+
+static inline struct sg2044_mux *hw_to_sg2044_mux(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_mux, common);
+}
+
+static inline struct sg2044_mux *nb_to_sg2044_mux(struct notifier_block *nb)
+{
+ return container_of(nb, struct sg2044_mux, nb);
+}
+
+static const u32 sg2044_mux_table[] = {0, 1};
+
+static int sg2044_mux_notifier_cb(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct sg2044_mux *mux = nb_to_sg2044_mux(nb);
+ const struct clk_ops *ops = &clk_mux_ops;
+ struct clk_notifier_data *ndata = data;
+ struct clk_hw *hw = __clk_get_hw(ndata->clk);
+ int ret = 0;
+
+ if (event == PRE_RATE_CHANGE) {
+ mux->saved_parent = ops->get_parent(hw);
+ if (mux->saved_parent)
+ ret = ops->set_parent(hw, 0);
+ } else if (event == POST_RATE_CHANGE) {
+ ret = ops->set_parent(hw, mux->saved_parent);
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static inline struct sg2044_gate *hw_to_sg2044_gate(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_gate, common);
+}
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define SG2044_CLK_COMMON_PHWS(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_GATEABLE_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_gateable_div_ops,\
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_PDATA(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_RO(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ro_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags) | CLK_DIVIDER_READ_ONLY,\
+ }, \
+ }
+
+#define DEFINE_SG2044_MUX(_id, _name, _parent, _flags, \
+ _mux_offset, _mux_shift, \
+ _mux_table, _mux_flags) \
+ struct sg2044_mux _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &clk_mux_ops, (_flags)),\
+ .mux = { \
+ .table = (_mux_table), \
+ .offset = (_mux_offset), \
+ .shift = (_mux_shift), \
+ .flags = (_mux_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_GATE(_id, _name, _parent, _flags, \
+ _gate_offset, _gate_shift, _gate_flags) \
+ struct sg2044_gate _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &clk_gate_ops, (_flags)),\
+ .gate = { \
+ .offset = (_gate_offset), \
+ .shift = (_gate_shift), \
+ .flags = (_gate_flags), \
+ }, \
+ }
+
+static const struct clk_parent_data clk_fpll0_parent[] = {
+ { .fw_name = "fpll0" },
+};
+
+static const struct clk_parent_data clk_fpll1_parent[] = {
+ { .fw_name = "fpll1" },
+};
+
+static const struct clk_parent_data clk_fpll2_parent[] = {
+ { .fw_name = "fpll2" },
+};
+
+static const struct clk_parent_data clk_dpll0_parent[] = {
+ { .fw_name = "dpll0" },
+};
+
+static const struct clk_parent_data clk_dpll1_parent[] = {
+ { .fw_name = "dpll1" },
+};
+
+static const struct clk_parent_data clk_dpll2_parent[] = {
+ { .fw_name = "dpll2" },
+};
+
+static const struct clk_parent_data clk_dpll3_parent[] = {
+ { .fw_name = "dpll3" },
+};
+
+static const struct clk_parent_data clk_dpll4_parent[] = {
+ { .fw_name = "dpll4" },
+};
+
+static const struct clk_parent_data clk_dpll5_parent[] = {
+ { .fw_name = "dpll5" },
+};
+
+static const struct clk_parent_data clk_dpll6_parent[] = {
+ { .fw_name = "dpll6" },
+};
+
+static const struct clk_parent_data clk_dpll7_parent[] = {
+ { .fw_name = "dpll7" },
+};
+
+static const struct clk_parent_data clk_mpll0_parent[] = {
+ { .fw_name = "mpll0" },
+};
+
+static const struct clk_parent_data clk_mpll1_parent[] = {
+ { .fw_name = "mpll1" },
+};
+
+static const struct clk_parent_data clk_mpll2_parent[] = {
+ { .fw_name = "mpll2" },
+};
+
+static const struct clk_parent_data clk_mpll3_parent[] = {
+ { .fw_name = "mpll3" },
+};
+
+static const struct clk_parent_data clk_mpll4_parent[] = {
+ { .fw_name = "mpll4" },
+};
+
+static const struct clk_parent_data clk_mpll5_parent[] = {
+ { .fw_name = "mpll5" },
+};
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_FIXED, clk_div_ap_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x044, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_MAIN, clk_div_ap_sys_main,
+ clk_mpll0_parent, 0,
+ 0x040, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_FIXED, clk_div_rp_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x050, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_MAIN, clk_div_rp_sys_main,
+ clk_mpll1_parent, 0,
+ 0x04c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_FIXED, clk_div_tpu_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x058, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_MAIN, clk_div_tpu_sys_main,
+ clk_mpll2_parent, 0,
+ 0x054, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_FIXED, clk_div_noc_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x070, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_MAIN, clk_div_noc_sys_main,
+ clk_mpll3_parent, 0,
+ 0x06c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_FIXED, clk_div_vc_src0_fixed,
+ clk_fpll0_parent, 0,
+ 0x078, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_MAIN, clk_div_vc_src0_main,
+ clk_mpll4_parent, 0,
+ 0x074, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_FIXED, clk_div_vc_src1_fixed,
+ clk_fpll0_parent, 0,
+ 0x080, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 3);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_MAIN, clk_div_vc_src1_main,
+ clk_mpll5_parent, 0,
+ 0x07c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_FIXED, clk_div_cxp_mac_fixed,
+ clk_fpll0_parent, 0,
+ 0x088, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_MAIN, clk_div_cxp_mac_main,
+ clk_fpll1_parent, 0,
+ 0x084, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_FIXED, clk_div_ddr0_fixed,
+ clk_fpll0_parent, 0,
+ 0x124, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_MAIN, clk_div_ddr0_main,
+ clk_dpll0_parent, 0,
+ 0x120, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_FIXED, clk_div_ddr1_fixed,
+ clk_fpll0_parent, 0,
+ 0x12c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_MAIN, clk_div_ddr1_main,
+ clk_dpll1_parent, 0,
+ 0x128, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_FIXED, clk_div_ddr2_fixed,
+ clk_fpll0_parent, 0,
+ 0x134, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_MAIN, clk_div_ddr2_main,
+ clk_dpll2_parent, 0,
+ 0x130, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_FIXED, clk_div_ddr3_fixed,
+ clk_fpll0_parent, 0,
+ 0x13c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_MAIN, clk_div_ddr3_main,
+ clk_dpll3_parent, 0,
+ 0x138, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_FIXED, clk_div_ddr4_fixed,
+ clk_fpll0_parent, 0,
+ 0x144, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_MAIN, clk_div_ddr4_main,
+ clk_dpll4_parent, 0,
+ 0x140, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_FIXED, clk_div_ddr5_fixed,
+ clk_fpll0_parent, 0,
+ 0x14c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_MAIN, clk_div_ddr5_main,
+ clk_dpll5_parent, 0,
+ 0x148, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_FIXED, clk_div_ddr6_fixed,
+ clk_fpll0_parent, 0,
+ 0x154, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_MAIN, clk_div_ddr6_main,
+ clk_dpll6_parent, 0,
+ 0x150, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_FIXED, clk_div_ddr7_fixed,
+ clk_fpll0_parent, 0,
+ 0x15c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_MAIN, clk_div_ddr7_main,
+ clk_dpll7_parent, 0,
+ 0x158, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_50M, clk_div_top_50m,
+ clk_fpll0_parent, 0,
+ 0x048, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static const struct clk_hw *clk_div_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_TOP_AXI0, clk_div_top_axi0,
+ clk_fpll0_parent, 0,
+ 0x118, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 20);
+
+static const struct clk_hw *clk_div_top_axi0_parent[] = {
+ &clk_div_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_AXI_HSPERI, clk_div_top_axi_hsperi,
+ clk_fpll0_parent, 0,
+ 0x11c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 8);
+
+static const struct clk_hw *clk_div_top_axi_hsperi_parent[] = {
+ &clk_div_top_axi_hsperi.common.hw,
+};
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER0, clk_div_timer0,
+ clk_div_top_50m_parent, 0,
+ 0x0d0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER1, clk_div_timer1,
+ clk_div_top_50m_parent, 0,
+ 0x0d4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER2, clk_div_timer2,
+ clk_div_top_50m_parent, 0,
+ 0x0d8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER3, clk_div_timer3,
+ clk_div_top_50m_parent, 0,
+ 0x0dc, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER4, clk_div_timer4,
+ clk_div_top_50m_parent, 0,
+ 0x0e0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER5, clk_div_timer5,
+ clk_div_top_50m_parent, 0,
+ 0x0e4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER6, clk_div_timer6,
+ clk_div_top_50m_parent, 0,
+ 0x0e8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER7, clk_div_timer7,
+ clk_div_top_50m_parent, 0,
+ 0x0ec, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_PHY, clk_div_cxp_test_phy,
+ clk_fpll0_parent, 0,
+ 0x064, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_ETH_PHY, clk_div_cxp_test_eth_phy,
+ clk_fpll2_parent, 0,
+ 0x068, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C0_TEST_PHY, clk_div_c2c0_test_phy,
+ clk_fpll0_parent, 0,
+ 0x05c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C1_TEST_PHY, clk_div_c2c1_test_phy,
+ clk_fpll0_parent, 0,
+ 0x060, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PCIE_1G, clk_div_pcie_1g,
+ clk_fpll1_parent, 0,
+ 0x160, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_UART_500M, clk_div_uart_500m,
+ clk_fpll0_parent, 0,
+ 0x0cc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 4);
+
+static DEFINE_SG2044_DIV(CLK_DIV_GPIO_DB, clk_div_gpio_db,
+ clk_div_top_axi0_parent, 0,
+ 0x0f8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_SD, clk_div_sd,
+ clk_fpll0_parent, 0,
+ 0x110, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_SD_100K, clk_div_sd_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x114, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EMMC, clk_div_emmc,
+ clk_fpll0_parent, 0,
+ 0x108, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_EMMC_100K, clk_div_emmc_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x10c, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EFUSE, clk_div_efuse,
+ clk_fpll0_parent, 0,
+ 0x0f4, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TX_ETH0, clk_div_tx_eth0,
+ clk_fpll0_parent, 0,
+ 0x0fc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 16);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PTP_REF_I_ETH0, clk_div_ptp_ref_i_eth0,
+ clk_fpll0_parent, 0,
+ 0x100, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_REF_ETH0, clk_div_ref_eth0,
+ clk_fpll0_parent, 0,
+ 0x104, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PKA, clk_div_pka,
+ clk_fpll0_parent, 0,
+ 0x0f0, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static const struct clk_parent_data clk_mux_ddr0_parents[] = {
+ { .hw = &clk_div_ddr0_fixed.common.hw },
+ { .hw = &clk_div_ddr0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR0, clk_mux_ddr0,
+ clk_mux_ddr0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 7, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr1_parents[] = {
+ { .hw = &clk_div_ddr1_fixed.common.hw },
+ { .hw = &clk_div_ddr1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR1, clk_mux_ddr1,
+ clk_mux_ddr1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 8, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr2_parents[] = {
+ { .hw = &clk_div_ddr2_fixed.common.hw },
+ { .hw = &clk_div_ddr2_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR2, clk_mux_ddr2,
+ clk_mux_ddr2_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 9, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr3_parents[] = {
+ { .hw = &clk_div_ddr3_fixed.common.hw },
+ { .hw = &clk_div_ddr3_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR3, clk_mux_ddr3,
+ clk_mux_ddr3_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 10, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr4_parents[] = {
+ { .hw = &clk_div_ddr4_fixed.common.hw },
+ { .hw = &clk_div_ddr4_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR4, clk_mux_ddr4,
+ clk_mux_ddr4_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 11, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr5_parents[] = {
+ { .hw = &clk_div_ddr5_fixed.common.hw },
+ { .hw = &clk_div_ddr5_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR5, clk_mux_ddr5,
+ clk_mux_ddr5_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 12, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr6_parents[] = {
+ { .hw = &clk_div_ddr6_fixed.common.hw },
+ { .hw = &clk_div_ddr6_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR6, clk_mux_ddr6,
+ clk_mux_ddr6_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 13, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr7_parents[] = {
+ { .hw = &clk_div_ddr7_fixed.common.hw },
+ { .hw = &clk_div_ddr7_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR7, clk_mux_ddr7,
+ clk_mux_ddr7_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 14, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_noc_sys_parents[] = {
+ { .hw = &clk_div_noc_sys_fixed.common.hw },
+ { .hw = &clk_div_noc_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_NOC_SYS, clk_mux_noc_sys,
+ clk_mux_noc_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 3, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_tpu_sys_parents[] = {
+ { .hw = &clk_div_tpu_sys_fixed.common.hw },
+ { .hw = &clk_div_tpu_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_TPU_SYS, clk_mux_tpu_sys,
+ clk_mux_tpu_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 2, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_rp_sys_parents[] = {
+ { .hw = &clk_div_rp_sys_fixed.common.hw },
+ { .hw = &clk_div_rp_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_RP_SYS, clk_mux_rp_sys,
+ clk_mux_rp_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 1, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_ap_sys_parents[] = {
+ { .hw = &clk_div_ap_sys_fixed.common.hw },
+ { .hw = &clk_div_ap_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_AP_SYS, clk_mux_ap_sys,
+ clk_mux_ap_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 0, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src0_parents[] = {
+ { .hw = &clk_div_vc_src0_fixed.common.hw },
+ { .hw = &clk_div_vc_src0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC0, clk_mux_vc_src0,
+ clk_mux_vc_src0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 4, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src1_parents[] = {
+ { .hw = &clk_div_vc_src1_fixed.common.hw },
+ { .hw = &clk_div_vc_src1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC1, clk_mux_vc_src1,
+ clk_mux_vc_src1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 5, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_cxp_mac_parents[] = {
+ { .hw = &clk_div_cxp_mac_fixed.common.hw },
+ { .hw = &clk_div_cxp_mac_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_CXP_MAC, clk_mux_cxp_mac,
+ clk_mux_cxp_mac_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 6, sg2044_mux_table, 0);
+
+static const struct clk_hw *clk_gate_ap_sys_parent[] = {
+ &clk_mux_ap_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_AP_SYS, clk_gate_ap_sys,
+ clk_gate_ap_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 0, 0);
+
+static const struct clk_hw *clk_gate_rp_sys_parent[] = {
+ &clk_mux_rp_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_RP_SYS, clk_gate_rp_sys,
+ clk_gate_rp_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 2, 0);
+
+static const struct clk_hw *clk_gate_tpu_sys_parent[] = {
+ &clk_mux_tpu_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TPU_SYS, clk_gate_tpu_sys,
+ clk_gate_tpu_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 3, 0);
+
+static const struct clk_hw *clk_gate_noc_sys_parent[] = {
+ &clk_mux_noc_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_NOC_SYS, clk_gate_noc_sys,
+ clk_gate_noc_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 8, 0);
+
+static const struct clk_hw *clk_gate_vc_src0_parent[] = {
+ &clk_mux_vc_src0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC0, clk_gate_vc_src0,
+ clk_gate_vc_src0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 9, 0);
+
+static const struct clk_hw *clk_gate_vc_src1_parent[] = {
+ &clk_mux_vc_src1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC1, clk_gate_vc_src1,
+ clk_gate_vc_src1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr0_parent[] = {
+ &clk_mux_ddr0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR0, clk_gate_ddr0,
+ clk_gate_ddr0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 7, 0);
+
+static const struct clk_hw *clk_gate_ddr1_parent[] = {
+ &clk_mux_ddr1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR1, clk_gate_ddr1,
+ clk_gate_ddr1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 8, 0);
+
+static const struct clk_hw *clk_gate_ddr2_parent[] = {
+ &clk_mux_ddr2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR2, clk_gate_ddr2,
+ clk_gate_ddr2_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 9, 0);
+
+static const struct clk_hw *clk_gate_ddr3_parent[] = {
+ &clk_mux_ddr3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR3, clk_gate_ddr3,
+ clk_gate_ddr3_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr4_parent[] = {
+ &clk_mux_ddr4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR4, clk_gate_ddr4,
+ clk_gate_ddr4_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 11, 0);
+
+static const struct clk_hw *clk_gate_ddr5_parent[] = {
+ &clk_mux_ddr5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR5, clk_gate_ddr5,
+ clk_gate_ddr5_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 12, 0);
+
+static const struct clk_hw *clk_gate_ddr6_parent[] = {
+ &clk_mux_ddr6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR6, clk_gate_ddr6,
+ clk_gate_ddr6_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 13, 0);
+
+static const struct clk_hw *clk_gate_ddr7_parent[] = {
+ &clk_mux_ddr7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR7, clk_gate_ddr7,
+ clk_gate_ddr7_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 14, 0);
+
+static const struct clk_hw *clk_gate_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_50M, clk_gate_top_50m,
+ clk_gate_top_50m_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 1, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX, clk_gate_sc_rx,
+ clk_gate_sc_rx_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 12, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_x0y1_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX_X0Y1, clk_gate_sc_rx_x0y1,
+ clk_gate_sc_rx_x0y1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 13, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI0, clk_gate_top_axi0,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 5, 0);
+
+static const struct clk_hw *clk_gate_mailbox_intc_parent[] = {
+ &clk_gate_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC0, clk_gate_intc0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC1, clk_gate_intc1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC2, clk_gate_intc2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC3, clk_gate_intc3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX0, clk_gate_mailbox0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 16, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX1, clk_gate_mailbox1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX2, clk_gate_mailbox2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX3, clk_gate_mailbox3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 19, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI_HSPERI, clk_gate_top_axi_hsperi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_TIMER, clk_gate_apb_timer,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 7, 0);
+
+static const struct clk_hw *clk_gate_timer0_parent[] = {
+ &clk_div_timer0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER0, clk_gate_timer0,
+ clk_gate_timer0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 8, 0);
+
+static const struct clk_hw *clk_gate_timer1_parent[] = {
+ &clk_div_timer1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER1, clk_gate_timer1,
+ clk_gate_timer1_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 9, 0);
+
+static const struct clk_hw *clk_gate_timer2_parent[] = {
+ &clk_div_timer2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER2, clk_gate_timer2,
+ clk_gate_timer2_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 10, 0);
+
+static const struct clk_hw *clk_gate_timer3_parent[] = {
+ &clk_div_timer3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER3, clk_gate_timer3,
+ clk_gate_timer3_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 11, 0);
+
+static const struct clk_hw *clk_gate_timer4_parent[] = {
+ &clk_div_timer4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER4, clk_gate_timer4,
+ clk_gate_timer4_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 12, 0);
+
+static const struct clk_hw *clk_gate_timer5_parent[] = {
+ &clk_div_timer5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER5, clk_gate_timer5,
+ clk_gate_timer5_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 13, 0);
+
+static const struct clk_hw *clk_gate_timer6_parent[] = {
+ &clk_div_timer6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER6, clk_gate_timer6,
+ clk_gate_timer6_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 14, 0);
+
+static const struct clk_hw *clk_gate_timer7_parent[] = {
+ &clk_div_timer7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER7, clk_gate_timer7,
+ clk_gate_timer7_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 15, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_CFG, clk_gate_cxp_cfg,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 15, 0);
+
+static const struct clk_hw *clk_gate_cxp_mac_parent[] = {
+ &clk_mux_cxp_mac.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_MAC, clk_gate_cxp_mac,
+ clk_gate_cxp_mac_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 14, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_phy_parent[] = {
+ &clk_div_cxp_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_PHY, clk_gate_cxp_test_phy,
+ clk_gate_cxp_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 6, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_eth_phy_parent[] = {
+ &clk_div_cxp_test_eth_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_ETH_PHY, clk_gate_cxp_test_eth_phy,
+ clk_gate_cxp_test_eth_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 7, 0);
+
+static const struct clk_hw *clk_gate_pcie_1g_parent[] = {
+ &clk_div_pcie_1g.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PCIE_1G, clk_gate_pcie_1g,
+ clk_gate_pcie_1g_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 15, 0);
+
+static const struct clk_hw *clk_gate_c2c0_test_phy_parent[] = {
+ &clk_div_c2c0_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C0_TEST_PHY, clk_gate_c2c0_test_phy,
+ clk_gate_c2c0_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 4, 0);
+
+static const struct clk_hw *clk_gate_c2c1_test_phy_parent[] = {
+ &clk_div_c2c1_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C1_TEST_PHY, clk_gate_c2c1_test_phy,
+ clk_gate_c2c1_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 5, 0);
+
+static const struct clk_hw *clk_gate_uart_500m_parent[] = {
+ &clk_div_uart_500m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_UART_500M, clk_gate_uart_500m,
+ clk_gate_uart_500m_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 1, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_UART, clk_gate_apb_uart,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 2, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_SPI, clk_gate_apb_spi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_SPIFMC, clk_gate_ahb_spifmc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 5, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_I2C, clk_gate_apb_i2c,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_DBG_I2C, clk_gate_axi_dbg_i2c,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 3, 0);
+
+static const struct clk_hw *clk_gate_gpio_db_parent[] = {
+ &clk_div_gpio_db.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_GPIO_DB, clk_gate_gpio_db,
+ clk_gate_gpio_db_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO_INTR, clk_gate_apb_gpio_intr,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO, clk_gate_apb_gpio,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 19, 0);
+
+static const struct clk_hw *clk_gate_sd_parent[] = {
+ &clk_div_sd.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD, clk_gate_sd,
+ clk_gate_sd_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 3, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SD, clk_gate_axi_sd,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 2, 0);
+
+static const struct clk_hw *clk_gate_sd_100k_parent[] = {
+ &clk_div_sd_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD_100K, clk_gate_sd_100k,
+ clk_gate_sd_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 4, 0);
+
+static const struct clk_hw *clk_gate_emmc_parent[] = {
+ &clk_div_emmc.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC, clk_gate_emmc,
+ clk_gate_emmc_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 0, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_EMMC, clk_gate_axi_emmc,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 31, 0);
+
+static const struct clk_hw *clk_gate_emmc_100k_parent[] = {
+ &clk_div_emmc_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC_100K, clk_gate_emmc_100k,
+ clk_gate_emmc_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 1, 0);
+
+static const struct clk_hw *clk_gate_efuse_parent[] = {
+ &clk_div_efuse.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EFUSE, clk_gate_efuse,
+ clk_gate_efuse_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_EFUSE, clk_gate_apb_efuse,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_SYSDMA_AXI, clk_gate_sysdma_axi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 0, 0);
+
+static const struct clk_hw *clk_gate_tx_eth0_parent[] = {
+ &clk_div_tx_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TX_ETH0, clk_gate_tx_eth0,
+ clk_gate_tx_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 27, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_ETH0, clk_gate_axi_eth0,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 28, 0);
+
+static const struct clk_hw *clk_gate_ptp_ref_i_eth0_parent[] = {
+ &clk_div_ptp_ref_i_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PTP_REF_I_ETH0, clk_gate_ptp_ref_i_eth0,
+ clk_gate_ptp_ref_i_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 29, 0);
+
+static const struct clk_hw *clk_gate_ref_eth0_parent[] = {
+ &clk_div_ref_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_REF_ETH0, clk_gate_ref_eth0,
+ clk_gate_ref_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 30, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_RTC, clk_gate_apb_rtc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 26, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_PWM, clk_gate_apb_pwm,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 25, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_WDT, clk_gate_apb_wdt,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 24, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SRAM, clk_gate_axi_sram,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_ROM, clk_gate_ahb_rom,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 4, 0);
+
+static const struct clk_hw *clk_gate_pka_parent[] = {
+ &clk_div_pka.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PKA, clk_gate_pka,
+ clk_gate_pka_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 16, 0);
+
+static struct sg2044_clk_common * const sg2044_div_commons[] = {
+ &clk_div_ap_sys_fixed.common,
+ &clk_div_ap_sys_main.common,
+ &clk_div_rp_sys_fixed.common,
+ &clk_div_rp_sys_main.common,
+ &clk_div_tpu_sys_fixed.common,
+ &clk_div_tpu_sys_main.common,
+ &clk_div_noc_sys_fixed.common,
+ &clk_div_noc_sys_main.common,
+ &clk_div_vc_src0_fixed.common,
+ &clk_div_vc_src0_main.common,
+ &clk_div_vc_src1_fixed.common,
+ &clk_div_vc_src1_main.common,
+ &clk_div_cxp_mac_fixed.common,
+ &clk_div_cxp_mac_main.common,
+ &clk_div_ddr0_fixed.common,
+ &clk_div_ddr0_main.common,
+ &clk_div_ddr1_fixed.common,
+ &clk_div_ddr1_main.common,
+ &clk_div_ddr2_fixed.common,
+ &clk_div_ddr2_main.common,
+ &clk_div_ddr3_fixed.common,
+ &clk_div_ddr3_main.common,
+ &clk_div_ddr4_fixed.common,
+ &clk_div_ddr4_main.common,
+ &clk_div_ddr5_fixed.common,
+ &clk_div_ddr5_main.common,
+ &clk_div_ddr6_fixed.common,
+ &clk_div_ddr6_main.common,
+ &clk_div_ddr7_fixed.common,
+ &clk_div_ddr7_main.common,
+ &clk_div_top_50m.common,
+ &clk_div_top_axi0.common,
+ &clk_div_top_axi_hsperi.common,
+ &clk_div_timer0.common,
+ &clk_div_timer1.common,
+ &clk_div_timer2.common,
+ &clk_div_timer3.common,
+ &clk_div_timer4.common,
+ &clk_div_timer5.common,
+ &clk_div_timer6.common,
+ &clk_div_timer7.common,
+ &clk_div_cxp_test_phy.common,
+ &clk_div_cxp_test_eth_phy.common,
+ &clk_div_c2c0_test_phy.common,
+ &clk_div_c2c1_test_phy.common,
+ &clk_div_pcie_1g.common,
+ &clk_div_uart_500m.common,
+ &clk_div_gpio_db.common,
+ &clk_div_sd.common,
+ &clk_div_sd_100k.common,
+ &clk_div_emmc.common,
+ &clk_div_emmc_100k.common,
+ &clk_div_efuse.common,
+ &clk_div_tx_eth0.common,
+ &clk_div_ptp_ref_i_eth0.common,
+ &clk_div_ref_eth0.common,
+ &clk_div_pka.common,
+};
+
+static struct sg2044_clk_common * const sg2044_mux_commons[] = {
+ &clk_mux_ddr0.common,
+ &clk_mux_ddr1.common,
+ &clk_mux_ddr2.common,
+ &clk_mux_ddr3.common,
+ &clk_mux_ddr4.common,
+ &clk_mux_ddr5.common,
+ &clk_mux_ddr6.common,
+ &clk_mux_ddr7.common,
+ &clk_mux_noc_sys.common,
+ &clk_mux_tpu_sys.common,
+ &clk_mux_rp_sys.common,
+ &clk_mux_ap_sys.common,
+ &clk_mux_vc_src0.common,
+ &clk_mux_vc_src1.common,
+ &clk_mux_cxp_mac.common,
+};
+
+static struct sg2044_clk_common * const sg2044_gate_commons[] = {
+ &clk_gate_ap_sys.common,
+ &clk_gate_rp_sys.common,
+ &clk_gate_tpu_sys.common,
+ &clk_gate_noc_sys.common,
+ &clk_gate_vc_src0.common,
+ &clk_gate_vc_src1.common,
+ &clk_gate_ddr0.common,
+ &clk_gate_ddr1.common,
+ &clk_gate_ddr2.common,
+ &clk_gate_ddr3.common,
+ &clk_gate_ddr4.common,
+ &clk_gate_ddr5.common,
+ &clk_gate_ddr6.common,
+ &clk_gate_ddr7.common,
+ &clk_gate_top_50m.common,
+ &clk_gate_sc_rx.common,
+ &clk_gate_sc_rx_x0y1.common,
+ &clk_gate_top_axi0.common,
+ &clk_gate_intc0.common,
+ &clk_gate_intc1.common,
+ &clk_gate_intc2.common,
+ &clk_gate_intc3.common,
+ &clk_gate_mailbox0.common,
+ &clk_gate_mailbox1.common,
+ &clk_gate_mailbox2.common,
+ &clk_gate_mailbox3.common,
+ &clk_gate_top_axi_hsperi.common,
+ &clk_gate_apb_timer.common,
+ &clk_gate_timer0.common,
+ &clk_gate_timer1.common,
+ &clk_gate_timer2.common,
+ &clk_gate_timer3.common,
+ &clk_gate_timer4.common,
+ &clk_gate_timer5.common,
+ &clk_gate_timer6.common,
+ &clk_gate_timer7.common,
+ &clk_gate_cxp_cfg.common,
+ &clk_gate_cxp_mac.common,
+ &clk_gate_cxp_test_phy.common,
+ &clk_gate_cxp_test_eth_phy.common,
+ &clk_gate_pcie_1g.common,
+ &clk_gate_c2c0_test_phy.common,
+ &clk_gate_c2c1_test_phy.common,
+ &clk_gate_uart_500m.common,
+ &clk_gate_apb_uart.common,
+ &clk_gate_apb_spi.common,
+ &clk_gate_ahb_spifmc.common,
+ &clk_gate_apb_i2c.common,
+ &clk_gate_axi_dbg_i2c.common,
+ &clk_gate_gpio_db.common,
+ &clk_gate_apb_gpio_intr.common,
+ &clk_gate_apb_gpio.common,
+ &clk_gate_sd.common,
+ &clk_gate_axi_sd.common,
+ &clk_gate_sd_100k.common,
+ &clk_gate_emmc.common,
+ &clk_gate_axi_emmc.common,
+ &clk_gate_emmc_100k.common,
+ &clk_gate_efuse.common,
+ &clk_gate_apb_efuse.common,
+ &clk_gate_sysdma_axi.common,
+ &clk_gate_tx_eth0.common,
+ &clk_gate_axi_eth0.common,
+ &clk_gate_ptp_ref_i_eth0.common,
+ &clk_gate_ref_eth0.common,
+ &clk_gate_apb_rtc.common,
+ &clk_gate_apb_pwm.common,
+ &clk_gate_apb_wdt.common,
+ &clk_gate_axi_sram.common,
+ &clk_gate_ahb_rom.common,
+ &clk_gate_pka.common,
+};
+
+static void sg2044_clk_fix_init_parent(struct clk_hw **pdata,
+ const struct clk_init_data *init,
+ struct clk_hw_onecell_data *data)
+{
+ u8 i;
+ const struct clk_hw *hw;
+ const struct sg2044_clk_common *common;
+
+ for (i = 0; i < init->num_parents; i++) {
+ hw = init->parent_hws[i];
+ common = hw_to_sg2044_clk_common(hw);
+
+ WARN(!data->hws[common->id], "clk %u is not register\n",
+ common->id);
+ pdata[i] = data->hws[common->id];
+ }
+}
+
+static int sg2044_clk_init_ctrl(struct device *dev, void __iomem *reg,
+ struct sg2044_clk_ctrl *ctrl,
+ const struct sg2044_clk_desc_data *desc)
+{
+ int ret, i;
+ struct clk_hw *hw;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_div; i++) {
+ struct sg2044_clk_common *common = desc->div[i];
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ for (i = 0; i < desc->num_mux; i++) {
+ struct sg2044_clk_common *common = desc->mux[i];
+ struct sg2044_mux *mux = hw_to_sg2044_mux(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ init->name,
+ init->parent_data,
+ init->num_parents,
+ init->flags,
+ reg + mux->mux.offset,
+ mux->mux.shift,
+ 1,
+ mux->mux.flags,
+ mux->mux.table,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ if (!(mux->mux.flags & CLK_MUX_READ_ONLY)) {
+ mux->nb.notifier_call = sg2044_mux_notifier_cb;
+ ret = devm_clk_notifier_register(dev, hw->clk,
+ &mux->nb);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "%s: failed to register notifier\n",
+ clk_hw_get_name(hw));
+ }
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ for (i = 0; i < desc->num_gate; i++) {
+ struct sg2044_clk_common *common = desc->gate[i];
+ struct sg2044_gate *gate = hw_to_sg2044_gate(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+ struct clk_hw *parent_hws[1] = { };
+
+ sg2044_clk_fix_init_parent(parent_hws, init, &ctrl->data);
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_gate_parent_hw(dev, init->name,
+ parent_hws[0],
+ init->flags,
+ reg + gate->gate.offset,
+ gate->gate.shift,
+ gate->gate.flags,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_clk_ctrl *ctrl;
+ const struct sg2044_clk_desc_data *desc;
+ void __iomem *reg;
+ u32 num_clks;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ desc = device_get_match_data(dev);
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ num_clks = desc->num_div + desc->num_gate + desc->num_mux;
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, num_clks), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = num_clks;
+
+ return sg2044_clk_init_ctrl(dev, reg, ctrl, desc);
+}
+
+static const struct sg2044_clk_desc_data sg2044_clk_desc_data = {
+ .div = sg2044_div_commons,
+ .mux = sg2044_mux_commons,
+ .gate = sg2044_gate_commons,
+ .num_div = ARRAY_SIZE(sg2044_div_commons),
+ .num_mux = ARRAY_SIZE(sg2044_mux_commons),
+ .num_gate = ARRAY_SIZE(sg2044_gate_commons),
+};
+
+static const struct of_device_id sg2044_clk_match[] = {
+ { .compatible = "sophgo,sg2044-clk", .data = &sg2044_clk_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_clk_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_clk_probe,
+ .driver = {
+ .name = "sg2044-clk",
+ .of_match_table = sg2044_clk_match,
+ },
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig
new file mode 100644
index 000000000000..4c4df845b3cb
--- /dev/null
+++ b/drivers/clk/spacemit/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config SPACEMIT_CCU
+ tristate "Clock support for SpacemiT SoCs"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Say Y to enable clock controller unit support for SpacemiT SoCs.
+
+if SPACEMIT_CCU
+
+config SPACEMIT_K1_CCU
+ tristate "Support for SpacemiT K1 SoC"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ help
+ Support for clock controller unit in SpacemiT K1 SoC.
+
+endif
diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile
new file mode 100644
index 000000000000..5ec6da61db98
--- /dev/null
+++ b/drivers/clk/spacemit/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SPACEMIT_K1_CCU) = spacemit-ccu-k1.o
+spacemit-ccu-k1-y = ccu_pll.o ccu_mix.o ccu_ddn.o
+spacemit-ccu-k1-y += ccu-k1.o
diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c
new file mode 100644
index 000000000000..cdde37a05235
--- /dev/null
+++ b/drivers/clk/spacemit/ccu-k1.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/array_size.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+#include "ccu_mix.h"
+#include "ccu_ddn.h"
+
+#include <dt-bindings/clock/spacemit,k1-syscon.h>
+
+/* APBS register offset */
+#define APBS_PLL1_SWCR1 0x100
+#define APBS_PLL1_SWCR2 0x104
+#define APBS_PLL1_SWCR3 0x108
+#define APBS_PLL2_SWCR1 0x118
+#define APBS_PLL2_SWCR2 0x11c
+#define APBS_PLL2_SWCR3 0x120
+#define APBS_PLL3_SWCR1 0x124
+#define APBS_PLL3_SWCR2 0x128
+#define APBS_PLL3_SWCR3 0x12c
+
+/* MPMU register offset */
+#define MPMU_POSR 0x0010
+#define POSR_PLL1_LOCK BIT(27)
+#define POSR_PLL2_LOCK BIT(28)
+#define POSR_PLL3_LOCK BIT(29)
+#define MPMU_SUCCR 0x0014
+#define MPMU_ISCCR 0x0044
+#define MPMU_WDTPCR 0x0200
+#define MPMU_RIPCCR 0x0210
+#define MPMU_ACGR 0x1024
+#define MPMU_APBCSCR 0x1050
+#define MPMU_SUCCR_1 0x10b0
+
+/* APBC register offset */
+#define APBC_UART1_CLK_RST 0x00
+#define APBC_UART2_CLK_RST 0x04
+#define APBC_GPIO_CLK_RST 0x08
+#define APBC_PWM0_CLK_RST 0x0c
+#define APBC_PWM1_CLK_RST 0x10
+#define APBC_PWM2_CLK_RST 0x14
+#define APBC_PWM3_CLK_RST 0x18
+#define APBC_TWSI8_CLK_RST 0x20
+#define APBC_UART3_CLK_RST 0x24
+#define APBC_RTC_CLK_RST 0x28
+#define APBC_TWSI0_CLK_RST 0x2c
+#define APBC_TWSI1_CLK_RST 0x30
+#define APBC_TIMERS1_CLK_RST 0x34
+#define APBC_TWSI2_CLK_RST 0x38
+#define APBC_AIB_CLK_RST 0x3c
+#define APBC_TWSI4_CLK_RST 0x40
+#define APBC_TIMERS2_CLK_RST 0x44
+#define APBC_ONEWIRE_CLK_RST 0x48
+#define APBC_TWSI5_CLK_RST 0x4c
+#define APBC_DRO_CLK_RST 0x58
+#define APBC_IR_CLK_RST 0x5c
+#define APBC_TWSI6_CLK_RST 0x60
+#define APBC_COUNTER_CLK_SEL 0x64
+#define APBC_TWSI7_CLK_RST 0x68
+#define APBC_TSEN_CLK_RST 0x6c
+#define APBC_UART4_CLK_RST 0x70
+#define APBC_UART5_CLK_RST 0x74
+#define APBC_UART6_CLK_RST 0x78
+#define APBC_SSP3_CLK_RST 0x7c
+#define APBC_SSPA0_CLK_RST 0x80
+#define APBC_SSPA1_CLK_RST 0x84
+#define APBC_IPC_AP2AUD_CLK_RST 0x90
+#define APBC_UART7_CLK_RST 0x94
+#define APBC_UART8_CLK_RST 0x98
+#define APBC_UART9_CLK_RST 0x9c
+#define APBC_CAN0_CLK_RST 0xa0
+#define APBC_PWM4_CLK_RST 0xa8
+#define APBC_PWM5_CLK_RST 0xac
+#define APBC_PWM6_CLK_RST 0xb0
+#define APBC_PWM7_CLK_RST 0xb4
+#define APBC_PWM8_CLK_RST 0xb8
+#define APBC_PWM9_CLK_RST 0xbc
+#define APBC_PWM10_CLK_RST 0xc0
+#define APBC_PWM11_CLK_RST 0xc4
+#define APBC_PWM12_CLK_RST 0xc8
+#define APBC_PWM13_CLK_RST 0xcc
+#define APBC_PWM14_CLK_RST 0xd0
+#define APBC_PWM15_CLK_RST 0xd4
+#define APBC_PWM16_CLK_RST 0xd8
+#define APBC_PWM17_CLK_RST 0xdc
+#define APBC_PWM18_CLK_RST 0xe0
+#define APBC_PWM19_CLK_RST 0xe4
+
+/* APMU register offset */
+#define APMU_JPG_CLK_RES_CTRL 0x020
+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
+#define APMU_ISP_CLK_RES_CTRL 0x038
+#define APMU_LCD_CLK_RES_CTRL1 0x044
+#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
+#define APMU_LCD_CLK_RES_CTRL2 0x04c
+#define APMU_CCIC_CLK_RES_CTRL 0x050
+#define APMU_SDH0_CLK_RES_CTRL 0x054
+#define APMU_SDH1_CLK_RES_CTRL 0x058
+#define APMU_USB_CLK_RES_CTRL 0x05c
+#define APMU_QSPI_CLK_RES_CTRL 0x060
+#define APMU_DMA_CLK_RES_CTRL 0x064
+#define APMU_AES_CLK_RES_CTRL 0x068
+#define APMU_VPU_CLK_RES_CTRL 0x0a4
+#define APMU_GPU_CLK_RES_CTRL 0x0cc
+#define APMU_SDH2_CLK_RES_CTRL 0x0e0
+#define APMU_PMUA_MC_CTRL 0x0e8
+#define APMU_PMU_CC2_AP 0x100
+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
+#define APMU_AUDIO_CLK_RES_CTRL 0x14c
+#define APMU_HDMI_CLK_RES_CTRL 0x1b8
+#define APMU_CCI550_CLK_CTRL 0x300
+#define APMU_ACLK_CLK_CTRL 0x388
+#define APMU_CPU_C0_CLK_CTRL 0x38C
+#define APMU_CPU_C1_CLK_CTRL 0x390
+#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc
+#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4
+#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc
+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
+
+struct spacemit_ccu_data {
+ struct clk_hw **hws;
+ size_t num;
+};
+
+/* APBS clocks start, APBS region contains and only contains all PLL clocks */
+
+/*
+ * PLL{1,2} must run at fixed frequencies to provide clocks in correct rates for
+ * peripherals.
+ */
+static const struct ccu_pll_rate_tbl pll1_rate_tbl[] = {
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+};
+
+static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = {
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+};
+
+static const struct ccu_pll_rate_tbl pll3_rate_tbl[] = {
+ CCU_PLL_RATE(1600000000UL, 0x0050cd61, 0x43eaaaab),
+ CCU_PLL_RATE(1800000000UL, 0x0050cd61, 0x4b000000),
+ CCU_PLL_RATE(2000000000UL, 0x0050dd62, 0x2aeaaaab),
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+ CCU_PLL_RATE(3200000000UL, 0x0050dd67, 0x43eaaaab),
+};
+
+CCU_PLL_DEFINE(pll1, pll1_rate_tbl, APBS_PLL1_SWCR1, APBS_PLL1_SWCR3, MPMU_POSR, POSR_PLL1_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll2, pll2_rate_tbl, APBS_PLL2_SWCR1, APBS_PLL2_SWCR3, MPMU_POSR, POSR_PLL2_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll3, pll3_rate_tbl, APBS_PLL3_SWCR1, APBS_PLL3_SWCR3, MPMU_POSR, POSR_PLL3_LOCK,
+ CLK_SET_RATE_GATE);
+
+CCU_FACTOR_GATE_DEFINE(pll1_d2, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d3, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d6, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d11_223p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(15), 11, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d13_189, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(16), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d23_106p8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(20), 23, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d64_38p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(0), 64, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_245p7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(10), 10, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_24p5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(11), 100, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll2_d1, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d2, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d3, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d4, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d5, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d6, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d7, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d8, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll3_d1, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d2, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d3, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d4, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d5, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d6, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d7, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d8, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_DEFINE(pll3_20, CCU_PARENT_HW(pll3_d8), 20, 1);
+CCU_FACTOR_DEFINE(pll3_40, CCU_PARENT_HW(pll3_d8), 10, 1);
+CCU_FACTOR_DEFINE(pll3_80, CCU_PARENT_HW(pll3_d8), 5, 1);
+
+/* APBS clocks end */
+
+/* MPMU clocks start */
+CCU_GATE_DEFINE(pll1_d8_307p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(13), 0);
+
+CCU_FACTOR_DEFINE(pll1_d32_76p8, CCU_PARENT_HW(pll1_d8_307p2), 4, 1);
+
+CCU_FACTOR_DEFINE(pll1_d40_61p44, CCU_PARENT_HW(pll1_d8_307p2), 5, 1);
+
+CCU_FACTOR_DEFINE(pll1_d16_153p6, CCU_PARENT_HW(pll1_d8), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d24_102p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(12), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(7), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2_ap, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(11), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_m3d128_57p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(8), 16, 3);
+CCU_FACTOR_GATE_DEFINE(pll1_d96_25p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(4), 12, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(3), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8_wdt, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(19), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d384_6p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(2), 48, 1);
+
+CCU_FACTOR_DEFINE(pll1_d768_3p2, CCU_PARENT_HW(pll1_d384_6p4), 2, 1);
+CCU_FACTOR_DEFINE(pll1_d1536_1p6, CCU_PARENT_HW(pll1_d384_6p4), 4, 1);
+CCU_FACTOR_DEFINE(pll1_d3072_0p8, CCU_PARENT_HW(pll1_d384_6p4), 8, 1);
+
+CCU_GATE_DEFINE(pll1_d6_409p6, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(0), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d12_204p8, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(5), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d5_491p52, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(21), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d10_245p76, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(18), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d4_614p4, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(15), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d52_47p26, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(10), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d78_31p5, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(6), 39, 2);
+
+CCU_GATE_DEFINE(pll1_d3_819p2, CCU_PARENT_HW(pll1_d3), MPMU_ACGR, BIT(14), 0);
+
+CCU_GATE_DEFINE(pll1_d2_1228p8, CCU_PARENT_HW(pll1_d2), MPMU_ACGR, BIT(16), 0);
+
+CCU_GATE_DEFINE(slow_uart, CCU_PARENT_NAME(osc), MPMU_ACGR, BIT(1), CLK_IGNORE_UNUSED);
+CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 0);
+CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 0);
+
+CCU_GATE_DEFINE(wdt_clk, CCU_PARENT_HW(pll1_d96_25p6), MPMU_WDTPCR, BIT(1), 0);
+
+CCU_FACTOR_GATE_DEFINE(i2s_sysclk, CCU_PARENT_HW(pll1_d16_153p6), MPMU_ISCCR, BIT(31), 50, 1);
+CCU_FACTOR_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_sysclk), MPMU_ISCCR, BIT(29), 1, 1);
+
+static const struct clk_parent_data apb_parents[] = {
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_DEFINE(apb_clk, apb_parents, MPMU_APBCSCR, 0, 2, 0);
+
+CCU_GATE_DEFINE(wdt_bus_clk, CCU_PARENT_HW(apb_clk), MPMU_WDTPCR, BIT(0), 0);
+
+CCU_GATE_DEFINE(ripc_clk, CCU_PARENT_HW(apb_clk), MPMU_RIPCCR, 0x1, 0);
+/* MPMU clocks end */
+
+/* APBC clocks start */
+static const struct clk_parent_data uart_clk_parents[] = {
+ CCU_PARENT_HW(pll1_m3d128_57p6),
+ CCU_PARENT_HW(slow_uart1_14p74),
+ CCU_PARENT_HW(slow_uart2_48),
+};
+CCU_MUX_GATE_DEFINE(uart0_clk, uart_clk_parents, APBC_UART1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart2_clk, uart_clk_parents, APBC_UART2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart3_clk, uart_clk_parents, APBC_UART3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart4_clk, uart_clk_parents, APBC_UART4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart5_clk, uart_clk_parents, APBC_UART5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart6_clk, uart_clk_parents, APBC_UART6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart7_clk, uart_clk_parents, APBC_UART7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart8_clk, uart_clk_parents, APBC_UART8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart9_clk, uart_clk_parents, APBC_UART9_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(gpio_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_GPIO_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data pwm_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+};
+CCU_MUX_GATE_DEFINE(pwm0_clk, pwm_parents, APBC_PWM0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm1_clk, pwm_parents, APBC_PWM1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm2_clk, pwm_parents, APBC_PWM2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm3_clk, pwm_parents, APBC_PWM3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm4_clk, pwm_parents, APBC_PWM4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm5_clk, pwm_parents, APBC_PWM5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm6_clk, pwm_parents, APBC_PWM6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm7_clk, pwm_parents, APBC_PWM7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm8_clk, pwm_parents, APBC_PWM8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm9_clk, pwm_parents, APBC_PWM9_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm10_clk, pwm_parents, APBC_PWM10_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm11_clk, pwm_parents, APBC_PWM11_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm12_clk, pwm_parents, APBC_PWM12_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm13_clk, pwm_parents, APBC_PWM13_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm14_clk, pwm_parents, APBC_PWM14_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm15_clk, pwm_parents, APBC_PWM15_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm16_clk, pwm_parents, APBC_PWM16_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm17_clk, pwm_parents, APBC_PWM17_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm18_clk, pwm_parents, APBC_PWM18_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm19_clk, pwm_parents, APBC_PWM19_CLK_RST, 4, 3, BIT(1), 0);
+
+static const struct clk_parent_data ssp_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+};
+CCU_MUX_GATE_DEFINE(ssp3_clk, ssp_parents, APBC_SSP3_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(rtc_clk, CCU_PARENT_NAME(osc), APBC_RTC_CLK_RST,
+ BIT(7) | BIT(1), 0);
+
+static const struct clk_parent_data twsi_parents[] = {
+ CCU_PARENT_HW(pll1_d78_31p5),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d40_61p44),
+};
+CCU_MUX_GATE_DEFINE(twsi0_clk, twsi_parents, APBC_TWSI0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi1_clk, twsi_parents, APBC_TWSI1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi2_clk, twsi_parents, APBC_TWSI2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi4_clk, twsi_parents, APBC_TWSI4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi5_clk, twsi_parents, APBC_TWSI5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi6_clk, twsi_parents, APBC_TWSI6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi7_clk, twsi_parents, APBC_TWSI7_CLK_RST, 4, 3, BIT(1), 0);
+/*
+ * APBC_TWSI8_CLK_RST has a quirk that reading always results in zero.
+ * Combine functional and bus bits together as a gate to avoid sharing the
+ * write-only register between different clock hardwares.
+ */
+CCU_GATE_DEFINE(twsi8_clk, CCU_PARENT_HW(pll1_d78_31p5), APBC_TWSI8_CLK_RST, BIT(1) | BIT(0), 0);
+
+static const struct clk_parent_data timer_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_NAME(vctcxo_3m),
+ CCU_PARENT_NAME(vctcxo_1m),
+};
+CCU_MUX_GATE_DEFINE(timers1_clk, timer_parents, APBC_TIMERS1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(timers2_clk, timer_parents, APBC_TIMERS2_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(aib_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_AIB_CLK_RST, BIT(1), 0);
+
+CCU_GATE_DEFINE(onewire_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_ONEWIRE_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data sspa_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+ CCU_PARENT_HW(i2s_bclk),
+};
+CCU_MUX_GATE_DEFINE(sspa0_clk, sspa_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(sspa1_clk, sspa_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_GATE_DEFINE(dro_clk, CCU_PARENT_HW(apb_clk), APBC_DRO_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ir_clk, CCU_PARENT_HW(apb_clk), APBC_IR_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(tsen_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ipc_ap2aud_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data can_parents[] = {
+ CCU_PARENT_HW(pll3_20),
+ CCU_PARENT_HW(pll3_40),
+ CCU_PARENT_HW(pll3_80),
+};
+CCU_MUX_GATE_DEFINE(can0_clk, can_parents, APBC_CAN0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_GATE_DEFINE(can0_bus_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_CAN0_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(uart0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART9_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(gpio_bus_clk, CCU_PARENT_HW(apb_clk), APBC_GPIO_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(pwm0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM9_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm10_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM10_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm11_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM11_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm12_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM12_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm13_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM13_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm14_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM14_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm15_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM15_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm16_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM16_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm17_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM17_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm18_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM18_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm19_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM19_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ssp3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSP3_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(rtc_bus_clk, CCU_PARENT_HW(apb_clk), APBC_RTC_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(twsi0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI7_CLK_RST, BIT(0), 0);
+/* Placeholder to workaround quirk of the register */
+CCU_FACTOR_DEFINE(twsi8_bus_clk, CCU_PARENT_HW(apb_clk), 1, 1);
+
+CCU_GATE_DEFINE(timers1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(timers2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS2_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(aib_bus_clk, CCU_PARENT_HW(apb_clk), APBC_AIB_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(onewire_bus_clk, CCU_PARENT_HW(apb_clk), APBC_ONEWIRE_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(sspa0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(sspa1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA1_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(tsen_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ipc_ap2aud_bus_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(0), 0);
+/* APBC clocks end */
+
+/* APMU clocks start */
+static const struct clk_parent_data pmua_aclk_parents[] = {
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_FC_DEFINE(pmua_aclk, pmua_aclk_parents, APMU_ACLK_CLK_CTRL, 1, 2, BIT(4), 0, 1, 0);
+
+static const struct clk_parent_data cci550_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_FC_DEFINE(cci550_clk, cci550_clk_parents, APMU_CCI550_CLK_CTRL, 8, 3, BIT(12), 0, 2,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c0_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c0_hi_clk, cpu_c0_hi_clk_parents, APMU_CPU_C0_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c0_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c0_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c0_core_clk, cpu_c0_clk_parents, APMU_CPU_C0_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_ace_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_tcm_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 9, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c1_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c1_hi_clk, cpu_c1_hi_clk_parents, APMU_CPU_C1_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c1_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c1_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c1_core_clk, cpu_c1_clk_parents, APMU_CPU_C1_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c1_ace_clk, CCU_PARENT_HW(cpu_c1_core_clk), APMU_CPU_C1_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data jpg_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(jpg_clk, jpg_parents, APMU_JPG_CLK_RES_CTRL, 5, 3, BIT(15), 2, 3,
+ BIT(1), 0);
+
+static const struct clk_parent_data ccic2phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic2phy_clk, ccic2phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+static const struct clk_parent_data ccic3phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic3phy_clk, ccic3phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 31, 1, BIT(30), 0);
+
+static const struct clk_parent_data csi_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(csi_clk, csi_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 20, 3, BIT(15),
+ 16, 3, BIT(4), 0);
+
+static const struct clk_parent_data camm_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_NAME(vctcxo_24m),
+};
+CCU_MUX_DIV_GATE_DEFINE(camm0_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(28), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm1_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(6), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm2_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(3), 0);
+
+static const struct clk_parent_data isp_cpp_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_DEFINE(isp_cpp_clk, isp_cpp_parents, APMU_ISP_CLK_RES_CTRL, 24, 2, 26, 1,
+ BIT(28), 0);
+static const struct clk_parent_data isp_bus_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_bus_clk, isp_bus_parents, APMU_ISP_CLK_RES_CTRL, 18, 3, BIT(23),
+ 21, 2, BIT(17), 0);
+static const struct clk_parent_data isp_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_clk, isp_parents, APMU_ISP_CLK_RES_CTRL, 4, 3, BIT(7), 8, 2,
+ BIT(1), 0);
+
+static const struct clk_parent_data dpumclk_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_mclk, dpumclk_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 1, 4, BIT(29), 5, 3, BIT(0), 0);
+
+static const struct clk_parent_data dpuesc_parents[] = {
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d32_76p8),
+};
+CCU_MUX_GATE_DEFINE(dpu_esc_clk, dpuesc_parents, APMU_LCD_CLK_RES_CTRL1, 0, 2, BIT(2), 0);
+
+static const struct clk_parent_data dpubit_parents[] = {
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_bit_clk, dpubit_parents, APMU_LCD_CLK_RES_CTRL1, 17, 3, BIT(31),
+ 20, 3, BIT(16), 0);
+
+static const struct clk_parent_data dpupx_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_pxclk, dpupx_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 17, 4, BIT(30), 21, 3, BIT(16), 0);
+
+CCU_GATE_DEFINE(dpu_hclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_CLK_RES_CTRL1,
+ BIT(5), 0);
+
+static const struct clk_parent_data dpu_spi_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_spi_clk, dpu_spi_parents, APMU_LCD_SPI_CLK_RES_CTRL, 8, 3,
+ BIT(7), 12, 3, BIT(1), 0);
+CCU_GATE_DEFINE(dpu_spi_hbus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dpu_spi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(dpu_spi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(6), 0);
+
+static const struct clk_parent_data v2d_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d4_614p4),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(v2d_clk, v2d_parents, APMU_LCD_CLK_RES_CTRL1, 9, 3, BIT(28), 12, 2,
+ BIT(8), 0);
+
+static const struct clk_parent_data ccic_4x_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(ccic_4x_clk, ccic_4x_parents, APMU_CCIC_CLK_RES_CTRL, 18, 3,
+ BIT(15), 23, 2, BIT(4), 0);
+
+static const struct clk_parent_data ccic1phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic1phy_clk, ccic1phy_parents, APMU_CCIC_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+CCU_GATE_DEFINE(sdh_axi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_SDH0_CLK_RES_CTRL, BIT(3), 0);
+static const struct clk_parent_data sdh01_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh0_clk, sdh01_parents, APMU_SDH0_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh1_clk, sdh01_parents, APMU_SDH1_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+static const struct clk_parent_data sdh2_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh2_clk, sdh2_parents, APMU_SDH2_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+
+CCU_GATE_DEFINE(usb_axi_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(1), 0);
+CCU_GATE_DEFINE(usb_p1_aclk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(usb30_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(8), 0);
+
+static const struct clk_parent_data qspi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d13_189),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(qspi_clk, qspi_parents, APMU_QSPI_CLK_RES_CTRL, 9, 3, BIT(12), 6, 3,
+ BIT(4), 0);
+CCU_GATE_DEFINE(qspi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_QSPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dma_clk, CCU_PARENT_HW(pmua_aclk), APMU_DMA_CLK_RES_CTRL, BIT(3), 0);
+
+static const struct clk_parent_data aes_parents[] = {
+ CCU_PARENT_HW(pll1_d12_204p8),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_GATE_DEFINE(aes_clk, aes_parents, APMU_AES_CLK_RES_CTRL, 6, 1, BIT(5), 0);
+
+static const struct clk_parent_data vpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(vpu_clk, vpu_parents, APMU_VPU_CLK_RES_CTRL, 13, 3, BIT(21), 10, 3,
+ BIT(3), 0);
+
+static const struct clk_parent_data gpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(gpu_clk, gpu_parents, APMU_GPU_CLK_RES_CTRL, 12, 3, BIT(15), 18, 3,
+ BIT(4), 0);
+
+static const struct clk_parent_data emmc_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d3_819p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(emmc_clk, emmc_parents, APMU_PMUA_EM_CLK_RES_CTRL, 8, 3, BIT(11),
+ 6, 2, BIT(4), 0);
+CCU_DIV_GATE_DEFINE(emmc_x_clk, CCU_PARENT_HW(pll1_d2_1228p8), APMU_PMUA_EM_CLK_RES_CTRL, 12,
+ 3, BIT(15), 0);
+
+static const struct clk_parent_data audio_parents[] = {
+ CCU_PARENT_HW(pll1_aud_245p7),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(audio_clk, audio_parents, APMU_AUDIO_CLK_RES_CTRL, 4, 3, BIT(15),
+ 7, 3, BIT(12), 0);
+
+static const struct clk_parent_data hdmi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(hdmi_mclk, hdmi_parents, APMU_HDMI_CLK_RES_CTRL, 1, 4, BIT(29), 5,
+ 3, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie0_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(2), 0);
+CCU_GATE_DEFINE(pcie0_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(1), 0);
+CCU_GATE_DEFINE(pcie0_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie1_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(2), 0);
+CCU_GATE_DEFINE(pcie1_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(1), 0);
+CCU_GATE_DEFINE(pcie1_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie2_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(2), 0);
+CCU_GATE_DEFINE(pcie2_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(1), 0);
+CCU_GATE_DEFINE(pcie2_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(0), 0);
+
+CCU_GATE_DEFINE(emac0_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC0_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac0_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC0_CLK_RES_CTRL, BIT(15), 0);
+CCU_GATE_DEFINE(emac1_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC1_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac1_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC1_CLK_RES_CTRL, BIT(15), 0);
+
+CCU_GATE_DEFINE(emmc_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_PMUA_EM_CLK_RES_CTRL, BIT(3), 0);
+/* APMU clocks end */
+
+static struct clk_hw *k1_ccu_pll_hws[] = {
+ [CLK_PLL1] = &pll1.common.hw,
+ [CLK_PLL2] = &pll2.common.hw,
+ [CLK_PLL3] = &pll3.common.hw,
+ [CLK_PLL1_D2] = &pll1_d2.common.hw,
+ [CLK_PLL1_D3] = &pll1_d3.common.hw,
+ [CLK_PLL1_D4] = &pll1_d4.common.hw,
+ [CLK_PLL1_D5] = &pll1_d5.common.hw,
+ [CLK_PLL1_D6] = &pll1_d6.common.hw,
+ [CLK_PLL1_D7] = &pll1_d7.common.hw,
+ [CLK_PLL1_D8] = &pll1_d8.common.hw,
+ [CLK_PLL1_D11] = &pll1_d11_223p4.common.hw,
+ [CLK_PLL1_D13] = &pll1_d13_189.common.hw,
+ [CLK_PLL1_D23] = &pll1_d23_106p8.common.hw,
+ [CLK_PLL1_D64] = &pll1_d64_38p4.common.hw,
+ [CLK_PLL1_D10_AUD] = &pll1_aud_245p7.common.hw,
+ [CLK_PLL1_D100_AUD] = &pll1_aud_24p5.common.hw,
+ [CLK_PLL2_D1] = &pll2_d1.common.hw,
+ [CLK_PLL2_D2] = &pll2_d2.common.hw,
+ [CLK_PLL2_D3] = &pll2_d3.common.hw,
+ [CLK_PLL2_D4] = &pll2_d4.common.hw,
+ [CLK_PLL2_D5] = &pll2_d5.common.hw,
+ [CLK_PLL2_D6] = &pll2_d6.common.hw,
+ [CLK_PLL2_D7] = &pll2_d7.common.hw,
+ [CLK_PLL2_D8] = &pll2_d8.common.hw,
+ [CLK_PLL3_D1] = &pll3_d1.common.hw,
+ [CLK_PLL3_D2] = &pll3_d2.common.hw,
+ [CLK_PLL3_D3] = &pll3_d3.common.hw,
+ [CLK_PLL3_D4] = &pll3_d4.common.hw,
+ [CLK_PLL3_D5] = &pll3_d5.common.hw,
+ [CLK_PLL3_D6] = &pll3_d6.common.hw,
+ [CLK_PLL3_D7] = &pll3_d7.common.hw,
+ [CLK_PLL3_D8] = &pll3_d8.common.hw,
+ [CLK_PLL3_80] = &pll3_80.common.hw,
+ [CLK_PLL3_40] = &pll3_40.common.hw,
+ [CLK_PLL3_20] = &pll3_20.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_pll_data = {
+ .hws = k1_ccu_pll_hws,
+ .num = ARRAY_SIZE(k1_ccu_pll_hws),
+};
+
+static struct clk_hw *k1_ccu_mpmu_hws[] = {
+ [CLK_PLL1_307P2] = &pll1_d8_307p2.common.hw,
+ [CLK_PLL1_76P8] = &pll1_d32_76p8.common.hw,
+ [CLK_PLL1_61P44] = &pll1_d40_61p44.common.hw,
+ [CLK_PLL1_153P6] = &pll1_d16_153p6.common.hw,
+ [CLK_PLL1_102P4] = &pll1_d24_102p4.common.hw,
+ [CLK_PLL1_51P2] = &pll1_d48_51p2.common.hw,
+ [CLK_PLL1_51P2_AP] = &pll1_d48_51p2_ap.common.hw,
+ [CLK_PLL1_57P6] = &pll1_m3d128_57p6.common.hw,
+ [CLK_PLL1_25P6] = &pll1_d96_25p6.common.hw,
+ [CLK_PLL1_12P8] = &pll1_d192_12p8.common.hw,
+ [CLK_PLL1_12P8_WDT] = &pll1_d192_12p8_wdt.common.hw,
+ [CLK_PLL1_6P4] = &pll1_d384_6p4.common.hw,
+ [CLK_PLL1_3P2] = &pll1_d768_3p2.common.hw,
+ [CLK_PLL1_1P6] = &pll1_d1536_1p6.common.hw,
+ [CLK_PLL1_0P8] = &pll1_d3072_0p8.common.hw,
+ [CLK_PLL1_409P6] = &pll1_d6_409p6.common.hw,
+ [CLK_PLL1_204P8] = &pll1_d12_204p8.common.hw,
+ [CLK_PLL1_491] = &pll1_d5_491p52.common.hw,
+ [CLK_PLL1_245P76] = &pll1_d10_245p76.common.hw,
+ [CLK_PLL1_614] = &pll1_d4_614p4.common.hw,
+ [CLK_PLL1_47P26] = &pll1_d52_47p26.common.hw,
+ [CLK_PLL1_31P5] = &pll1_d78_31p5.common.hw,
+ [CLK_PLL1_819] = &pll1_d3_819p2.common.hw,
+ [CLK_PLL1_1228] = &pll1_d2_1228p8.common.hw,
+ [CLK_SLOW_UART] = &slow_uart.common.hw,
+ [CLK_SLOW_UART1] = &slow_uart1_14p74.common.hw,
+ [CLK_SLOW_UART2] = &slow_uart2_48.common.hw,
+ [CLK_WDT] = &wdt_clk.common.hw,
+ [CLK_RIPC] = &ripc_clk.common.hw,
+ [CLK_I2S_SYSCLK] = &i2s_sysclk.common.hw,
+ [CLK_I2S_BCLK] = &i2s_bclk.common.hw,
+ [CLK_APB] = &apb_clk.common.hw,
+ [CLK_WDT_BUS] = &wdt_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_mpmu_data = {
+ .hws = k1_ccu_mpmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_mpmu_hws),
+};
+
+static struct clk_hw *k1_ccu_apbc_hws[] = {
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART2] = &uart2_clk.common.hw,
+ [CLK_UART3] = &uart3_clk.common.hw,
+ [CLK_UART4] = &uart4_clk.common.hw,
+ [CLK_UART5] = &uart5_clk.common.hw,
+ [CLK_UART6] = &uart6_clk.common.hw,
+ [CLK_UART7] = &uart7_clk.common.hw,
+ [CLK_UART8] = &uart8_clk.common.hw,
+ [CLK_UART9] = &uart9_clk.common.hw,
+ [CLK_GPIO] = &gpio_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_PWM4] = &pwm4_clk.common.hw,
+ [CLK_PWM5] = &pwm5_clk.common.hw,
+ [CLK_PWM6] = &pwm6_clk.common.hw,
+ [CLK_PWM7] = &pwm7_clk.common.hw,
+ [CLK_PWM8] = &pwm8_clk.common.hw,
+ [CLK_PWM9] = &pwm9_clk.common.hw,
+ [CLK_PWM10] = &pwm10_clk.common.hw,
+ [CLK_PWM11] = &pwm11_clk.common.hw,
+ [CLK_PWM12] = &pwm12_clk.common.hw,
+ [CLK_PWM13] = &pwm13_clk.common.hw,
+ [CLK_PWM14] = &pwm14_clk.common.hw,
+ [CLK_PWM15] = &pwm15_clk.common.hw,
+ [CLK_PWM16] = &pwm16_clk.common.hw,
+ [CLK_PWM17] = &pwm17_clk.common.hw,
+ [CLK_PWM18] = &pwm18_clk.common.hw,
+ [CLK_PWM19] = &pwm19_clk.common.hw,
+ [CLK_SSP3] = &ssp3_clk.common.hw,
+ [CLK_RTC] = &rtc_clk.common.hw,
+ [CLK_TWSI0] = &twsi0_clk.common.hw,
+ [CLK_TWSI1] = &twsi1_clk.common.hw,
+ [CLK_TWSI2] = &twsi2_clk.common.hw,
+ [CLK_TWSI4] = &twsi4_clk.common.hw,
+ [CLK_TWSI5] = &twsi5_clk.common.hw,
+ [CLK_TWSI6] = &twsi6_clk.common.hw,
+ [CLK_TWSI7] = &twsi7_clk.common.hw,
+ [CLK_TWSI8] = &twsi8_clk.common.hw,
+ [CLK_TIMERS1] = &timers1_clk.common.hw,
+ [CLK_TIMERS2] = &timers2_clk.common.hw,
+ [CLK_AIB] = &aib_clk.common.hw,
+ [CLK_ONEWIRE] = &onewire_clk.common.hw,
+ [CLK_SSPA0] = &sspa0_clk.common.hw,
+ [CLK_SSPA1] = &sspa1_clk.common.hw,
+ [CLK_DRO] = &dro_clk.common.hw,
+ [CLK_IR] = &ir_clk.common.hw,
+ [CLK_TSEN] = &tsen_clk.common.hw,
+ [CLK_IPC_AP2AUD] = &ipc_ap2aud_clk.common.hw,
+ [CLK_CAN0] = &can0_clk.common.hw,
+ [CLK_CAN0_BUS] = &can0_bus_clk.common.hw,
+ [CLK_UART0_BUS] = &uart0_bus_clk.common.hw,
+ [CLK_UART2_BUS] = &uart2_bus_clk.common.hw,
+ [CLK_UART3_BUS] = &uart3_bus_clk.common.hw,
+ [CLK_UART4_BUS] = &uart4_bus_clk.common.hw,
+ [CLK_UART5_BUS] = &uart5_bus_clk.common.hw,
+ [CLK_UART6_BUS] = &uart6_bus_clk.common.hw,
+ [CLK_UART7_BUS] = &uart7_bus_clk.common.hw,
+ [CLK_UART8_BUS] = &uart8_bus_clk.common.hw,
+ [CLK_UART9_BUS] = &uart9_bus_clk.common.hw,
+ [CLK_GPIO_BUS] = &gpio_bus_clk.common.hw,
+ [CLK_PWM0_BUS] = &pwm0_bus_clk.common.hw,
+ [CLK_PWM1_BUS] = &pwm1_bus_clk.common.hw,
+ [CLK_PWM2_BUS] = &pwm2_bus_clk.common.hw,
+ [CLK_PWM3_BUS] = &pwm3_bus_clk.common.hw,
+ [CLK_PWM4_BUS] = &pwm4_bus_clk.common.hw,
+ [CLK_PWM5_BUS] = &pwm5_bus_clk.common.hw,
+ [CLK_PWM6_BUS] = &pwm6_bus_clk.common.hw,
+ [CLK_PWM7_BUS] = &pwm7_bus_clk.common.hw,
+ [CLK_PWM8_BUS] = &pwm8_bus_clk.common.hw,
+ [CLK_PWM9_BUS] = &pwm9_bus_clk.common.hw,
+ [CLK_PWM10_BUS] = &pwm10_bus_clk.common.hw,
+ [CLK_PWM11_BUS] = &pwm11_bus_clk.common.hw,
+ [CLK_PWM12_BUS] = &pwm12_bus_clk.common.hw,
+ [CLK_PWM13_BUS] = &pwm13_bus_clk.common.hw,
+ [CLK_PWM14_BUS] = &pwm14_bus_clk.common.hw,
+ [CLK_PWM15_BUS] = &pwm15_bus_clk.common.hw,
+ [CLK_PWM16_BUS] = &pwm16_bus_clk.common.hw,
+ [CLK_PWM17_BUS] = &pwm17_bus_clk.common.hw,
+ [CLK_PWM18_BUS] = &pwm18_bus_clk.common.hw,
+ [CLK_PWM19_BUS] = &pwm19_bus_clk.common.hw,
+ [CLK_SSP3_BUS] = &ssp3_bus_clk.common.hw,
+ [CLK_RTC_BUS] = &rtc_bus_clk.common.hw,
+ [CLK_TWSI0_BUS] = &twsi0_bus_clk.common.hw,
+ [CLK_TWSI1_BUS] = &twsi1_bus_clk.common.hw,
+ [CLK_TWSI2_BUS] = &twsi2_bus_clk.common.hw,
+ [CLK_TWSI4_BUS] = &twsi4_bus_clk.common.hw,
+ [CLK_TWSI5_BUS] = &twsi5_bus_clk.common.hw,
+ [CLK_TWSI6_BUS] = &twsi6_bus_clk.common.hw,
+ [CLK_TWSI7_BUS] = &twsi7_bus_clk.common.hw,
+ [CLK_TWSI8_BUS] = &twsi8_bus_clk.common.hw,
+ [CLK_TIMERS1_BUS] = &timers1_bus_clk.common.hw,
+ [CLK_TIMERS2_BUS] = &timers2_bus_clk.common.hw,
+ [CLK_AIB_BUS] = &aib_bus_clk.common.hw,
+ [CLK_ONEWIRE_BUS] = &onewire_bus_clk.common.hw,
+ [CLK_SSPA0_BUS] = &sspa0_bus_clk.common.hw,
+ [CLK_SSPA1_BUS] = &sspa1_bus_clk.common.hw,
+ [CLK_TSEN_BUS] = &tsen_bus_clk.common.hw,
+ [CLK_IPC_AP2AUD_BUS] = &ipc_ap2aud_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apbc_data = {
+ .hws = k1_ccu_apbc_hws,
+ .num = ARRAY_SIZE(k1_ccu_apbc_hws),
+};
+
+static struct clk_hw *k1_ccu_apmu_hws[] = {
+ [CLK_CCI550] = &cci550_clk.common.hw,
+ [CLK_CPU_C0_HI] = &cpu_c0_hi_clk.common.hw,
+ [CLK_CPU_C0_CORE] = &cpu_c0_core_clk.common.hw,
+ [CLK_CPU_C0_ACE] = &cpu_c0_ace_clk.common.hw,
+ [CLK_CPU_C0_TCM] = &cpu_c0_tcm_clk.common.hw,
+ [CLK_CPU_C1_HI] = &cpu_c1_hi_clk.common.hw,
+ [CLK_CPU_C1_CORE] = &cpu_c1_core_clk.common.hw,
+ [CLK_CPU_C1_ACE] = &cpu_c1_ace_clk.common.hw,
+ [CLK_CCIC_4X] = &ccic_4x_clk.common.hw,
+ [CLK_CCIC1PHY] = &ccic1phy_clk.common.hw,
+ [CLK_SDH_AXI] = &sdh_axi_aclk.common.hw,
+ [CLK_SDH0] = &sdh0_clk.common.hw,
+ [CLK_SDH1] = &sdh1_clk.common.hw,
+ [CLK_SDH2] = &sdh2_clk.common.hw,
+ [CLK_USB_P1] = &usb_p1_aclk.common.hw,
+ [CLK_USB_AXI] = &usb_axi_clk.common.hw,
+ [CLK_USB30] = &usb30_clk.common.hw,
+ [CLK_QSPI] = &qspi_clk.common.hw,
+ [CLK_QSPI_BUS] = &qspi_bus_clk.common.hw,
+ [CLK_DMA] = &dma_clk.common.hw,
+ [CLK_AES] = &aes_clk.common.hw,
+ [CLK_VPU] = &vpu_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_EMMC] = &emmc_clk.common.hw,
+ [CLK_EMMC_X] = &emmc_x_clk.common.hw,
+ [CLK_AUDIO] = &audio_clk.common.hw,
+ [CLK_HDMI] = &hdmi_mclk.common.hw,
+ [CLK_PMUA_ACLK] = &pmua_aclk.common.hw,
+ [CLK_PCIE0_MASTER] = &pcie0_master_clk.common.hw,
+ [CLK_PCIE0_SLAVE] = &pcie0_slave_clk.common.hw,
+ [CLK_PCIE0_DBI] = &pcie0_dbi_clk.common.hw,
+ [CLK_PCIE1_MASTER] = &pcie1_master_clk.common.hw,
+ [CLK_PCIE1_SLAVE] = &pcie1_slave_clk.common.hw,
+ [CLK_PCIE1_DBI] = &pcie1_dbi_clk.common.hw,
+ [CLK_PCIE2_MASTER] = &pcie2_master_clk.common.hw,
+ [CLK_PCIE2_SLAVE] = &pcie2_slave_clk.common.hw,
+ [CLK_PCIE2_DBI] = &pcie2_dbi_clk.common.hw,
+ [CLK_EMAC0_BUS] = &emac0_bus_clk.common.hw,
+ [CLK_EMAC0_PTP] = &emac0_ptp_clk.common.hw,
+ [CLK_EMAC1_BUS] = &emac1_bus_clk.common.hw,
+ [CLK_EMAC1_PTP] = &emac1_ptp_clk.common.hw,
+ [CLK_JPG] = &jpg_clk.common.hw,
+ [CLK_CCIC2PHY] = &ccic2phy_clk.common.hw,
+ [CLK_CCIC3PHY] = &ccic3phy_clk.common.hw,
+ [CLK_CSI] = &csi_clk.common.hw,
+ [CLK_CAMM0] = &camm0_clk.common.hw,
+ [CLK_CAMM1] = &camm1_clk.common.hw,
+ [CLK_CAMM2] = &camm2_clk.common.hw,
+ [CLK_ISP_CPP] = &isp_cpp_clk.common.hw,
+ [CLK_ISP_BUS] = &isp_bus_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_DPU_MCLK] = &dpu_mclk.common.hw,
+ [CLK_DPU_ESC] = &dpu_esc_clk.common.hw,
+ [CLK_DPU_BIT] = &dpu_bit_clk.common.hw,
+ [CLK_DPU_PXCLK] = &dpu_pxclk.common.hw,
+ [CLK_DPU_HCLK] = &dpu_hclk.common.hw,
+ [CLK_DPU_SPI] = &dpu_spi_clk.common.hw,
+ [CLK_DPU_SPI_HBUS] = &dpu_spi_hbus_clk.common.hw,
+ [CLK_DPU_SPIBUS] = &dpu_spi_bus_clk.common.hw,
+ [CLK_DPU_SPI_ACLK] = &dpu_spi_aclk.common.hw,
+ [CLK_V2D] = &v2d_clk.common.hw,
+ [CLK_EMMC_BUS] = &emmc_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apmu_data = {
+ .hws = k1_ccu_apmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_apmu_hws),
+};
+
+static int spacemit_ccu_register(struct device *dev,
+ struct regmap *regmap,
+ struct regmap *lock_regmap,
+ const struct spacemit_ccu_data *data)
+{
+ struct clk_hw_onecell_data *clk_data;
+ int i, ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, data->num),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num; i++) {
+ struct clk_hw *hw = data->hws[i];
+ struct ccu_common *common;
+ const char *name;
+
+ if (!hw) {
+ clk_data->hws[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ name = hw->init->name;
+
+ common = hw_to_ccu_common(hw);
+ common->regmap = regmap;
+ common->lock_regmap = lock_regmap;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Cannot register clock %d - %s\n",
+ i, name);
+ return ret;
+ }
+
+ clk_data->hws[i] = hw;
+ }
+
+ clk_data->num = data->num;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ dev_err(dev, "failed to add clock hardware provider (%d)\n", ret);
+
+ return ret;
+}
+
+static int k1_ccu_probe(struct platform_device *pdev)
+{
+ struct regmap *base_regmap, *lock_regmap = NULL;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ base_regmap = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(base_regmap))
+ return dev_err_probe(dev, PTR_ERR(base_regmap),
+ "failed to get regmap\n");
+
+ /*
+ * The lock status of PLLs locate in MPMU region, while PLLs themselves
+ * are in APBS region. Reference to MPMU syscon is required to check PLL
+ * status.
+ */
+ if (of_device_is_compatible(dev->of_node, "spacemit,k1-pll")) {
+ struct device_node *mpmu = of_parse_phandle(dev->of_node,
+ "spacemit,mpmu", 0);
+ if (!mpmu)
+ return dev_err_probe(dev, -ENODEV,
+ "Cannot parse MPMU region\n");
+
+ lock_regmap = device_node_to_regmap(mpmu);
+ of_node_put(mpmu);
+
+ if (IS_ERR(lock_regmap))
+ return dev_err_probe(dev, PTR_ERR(lock_regmap),
+ "failed to get lock regmap\n");
+ }
+
+ ret = spacemit_ccu_register(dev, base_regmap, lock_regmap,
+ of_device_get_match_data(dev));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register clocks\n");
+
+ return 0;
+}
+
+static const struct of_device_id of_k1_ccu_match[] = {
+ {
+ .compatible = "spacemit,k1-pll",
+ .data = &k1_ccu_pll_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-mpmu",
+ .data = &k1_ccu_mpmu_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apbc",
+ .data = &k1_ccu_apbc_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apmu",
+ .data = &k1_ccu_apmu_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_k1_ccu_match);
+
+static struct platform_driver k1_ccu_driver = {
+ .driver = {
+ .name = "spacemit,k1-ccu",
+ .of_match_table = of_k1_ccu_match,
+ },
+ .probe = k1_ccu_probe,
+};
+module_platform_driver(k1_ccu_driver);
+
+MODULE_DESCRIPTION("SpacemiT K1 CCU driver");
+MODULE_AUTHOR("Haylen Chu <heylenay@4d2.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/ccu_common.h b/drivers/clk/spacemit/ccu_common.h
new file mode 100644
index 000000000000..da72f3836e0b
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_common.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_COMMON_H_
+#define _CCU_COMMON_H_
+
+#include <linux/regmap.h>
+
+struct ccu_common {
+ struct regmap *regmap;
+ struct regmap *lock_regmap;
+
+ union {
+ /* For DDN and MIX */
+ struct {
+ u32 reg_ctrl;
+ u32 reg_fc;
+ u32 mask_fc;
+ };
+
+ /* For PLL */
+ struct {
+ u32 reg_swcr1;
+ u32 reg_swcr3;
+ };
+ };
+
+ struct clk_hw hw;
+};
+
+static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw)
+{
+ return container_of(hw, struct ccu_common, hw);
+}
+
+#define ccu_read(c, reg) \
+ ({ \
+ u32 tmp; \
+ regmap_read((c)->regmap, (c)->reg_##reg, &tmp); \
+ tmp; \
+ })
+#define ccu_update(c, reg, mask, val) \
+ regmap_update_bits((c)->regmap, (c)->reg_##reg, mask, val)
+
+#endif /* _CCU_COMMON_H_ */
diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c
new file mode 100644
index 000000000000..be311b045698
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * DDN stands for "Divider Denominator Numerator", it's M/N clock with a
+ * constant x2 factor. This clock hardware follows the equation below,
+ *
+ * numerator Fin
+ * 2 * ------------- = -------
+ * denominator Fout
+ *
+ * Thus, Fout could be calculated with,
+ *
+ * Fin denominator
+ * Fout = ----- * -------------
+ * 2 numerator
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/rational.h>
+
+#include "ccu_ddn.h"
+
+static unsigned long ccu_ddn_calc_rate(unsigned long prate,
+ unsigned long num, unsigned long den)
+{
+ return prate * den / 2 / num;
+}
+
+static unsigned long ccu_ddn_calc_best_rate(struct ccu_ddn *ddn,
+ unsigned long rate, unsigned long prate,
+ unsigned long *num, unsigned long *den)
+{
+ rational_best_approximation(rate, prate / 2,
+ ddn->den_mask >> ddn->den_shift,
+ ddn->num_mask >> ddn->num_shift,
+ den, num);
+ return ccu_ddn_calc_rate(prate, *num, *den);
+}
+
+static long ccu_ddn_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ return ccu_ddn_calc_best_rate(ddn, rate, *prate, &num, &den);
+}
+
+static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned int val, num, den;
+
+ val = ccu_read(&ddn->common, ctrl);
+
+ num = (val & ddn->num_mask) >> ddn->num_shift;
+ den = (val & ddn->den_mask) >> ddn->den_shift;
+
+ return ccu_ddn_calc_rate(prate, num, den);
+}
+
+static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ ccu_ddn_calc_best_rate(ddn, rate, prate, &num, &den);
+
+ ccu_update(&ddn->common, ctrl,
+ ddn->num_mask | ddn->den_mask,
+ (num << ddn->num_shift) | (den << ddn->den_shift));
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_ddn_ops = {
+ .recalc_rate = ccu_ddn_recalc_rate,
+ .round_rate = ccu_ddn_round_rate,
+ .set_rate = ccu_ddn_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h
new file mode 100644
index 000000000000..a52fabe77d62
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_DDN_H_
+#define _CCU_DDN_H_
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_ddn {
+ struct ccu_common common;
+ unsigned int num_mask;
+ unsigned int num_shift;
+ unsigned int den_mask;
+ unsigned int den_shift;
+};
+
+#define CCU_DDN_INIT(_name, _parent, _flags) \
+ CLK_HW_INIT_HW(#_name, &_parent.common.hw, &spacemit_ccu_ddn_ops, _flags)
+
+#define CCU_DDN_DEFINE(_name, _parent, _reg_ctrl, _num_shift, _num_width, \
+ _den_shift, _den_width, _flags) \
+static struct ccu_ddn _name = { \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .hw.init = CCU_DDN_INIT(_name, _parent, _flags), \
+ }, \
+ .num_mask = GENMASK(_num_shift + _num_width - 1, _num_shift), \
+ .num_shift = _num_shift, \
+ .den_mask = GENMASK(_den_shift + _den_width - 1, _den_shift), \
+ .den_shift = _den_shift, \
+}
+
+static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_ddn, common);
+}
+
+extern const struct clk_ops spacemit_ccu_ddn_ops;
+
+#endif
diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c
new file mode 100644
index 000000000000..9b852aa61f78
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * MIX clock type is the combination of mux, factor or divider, and gate
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_mix.h"
+
+#define MIX_FC_TIMEOUT_US 10000
+#define MIX_FC_DELAY_US 5
+
+static void ccu_gate_disable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ ccu_update(&mix->common, ctrl, mix->gate.mask, 0);
+}
+
+static int ccu_gate_enable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ ccu_update(&mix->common, ctrl, gate->mask, gate->mask);
+
+ return 0;
+}
+
+static int ccu_gate_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ return (ccu_read(&mix->common, ctrl) & gate->mask) == gate->mask;
+}
+
+static unsigned long ccu_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ return parent_rate * mix->factor.mul / mix->factor.div;
+}
+
+static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_div_config *div = &mix->div;
+ unsigned long val;
+
+ val = ccu_read(&mix->common, ctrl) >> div->shift;
+ val &= (1 << div->width) - 1;
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL, 0, div->width);
+}
+
+/*
+ * Some clocks require a "FC" (frequency change) bit to be set after changing
+ * their rates or reparenting. This bit will be automatically cleared by
+ * hardware in MIX_FC_TIMEOUT_US, which indicates the operation is completed.
+ */
+static int ccu_mix_trigger_fc(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+ unsigned int val;
+
+ if (common->reg_fc)
+ return 0;
+
+ ccu_update(common, fc, common->mask_fc, common->mask_fc);
+
+ return regmap_read_poll_timeout_atomic(common->regmap, common->reg_fc,
+ val, !(val & common->mask_fc),
+ MIX_FC_DELAY_US,
+ MIX_FC_TIMEOUT_US);
+}
+
+static long ccu_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return ccu_factor_recalc_rate(hw, *prate);
+}
+
+static int ccu_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static unsigned long
+ccu_mix_calc_best_rate(struct clk_hw *hw, unsigned long rate,
+ struct clk_hw **best_parent,
+ unsigned long *best_parent_rate,
+ u32 *div_val)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ unsigned int parent_num = clk_hw_get_num_parents(hw);
+ struct ccu_div_config *div = &mix->div;
+ u32 div_max = 1 << div->width;
+ unsigned long best_rate = 0;
+
+ for (int i = 0; i < parent_num; i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate;
+
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ for (int j = 1; j <= div_max; j++) {
+ unsigned long tmp = DIV_ROUND_CLOSEST_ULL(parent_rate, j);
+
+ if (abs(tmp - rate) < abs(best_rate - rate)) {
+ best_rate = tmp;
+
+ if (div_val)
+ *div_val = j - 1;
+
+ if (best_parent) {
+ *best_parent = parent;
+ *best_parent_rate = parent_rate;
+ }
+ }
+ }
+ }
+
+ return best_rate;
+}
+
+static int ccu_mix_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ req->rate = ccu_mix_calc_best_rate(hw, req->rate,
+ &req->best_parent_hw,
+ &req->best_parent_rate,
+ NULL);
+ return 0;
+}
+
+static int ccu_mix_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_common *common = &mix->common;
+ struct ccu_div_config *div = &mix->div;
+ u32 current_div, target_div, mask;
+
+ ccu_mix_calc_best_rate(hw, rate, NULL, NULL, &target_div);
+
+ current_div = ccu_read(common, ctrl) >> div->shift;
+ current_div &= (1 << div->width) - 1;
+
+ if (current_div == target_div)
+ return 0;
+
+ mask = GENMASK(div->width + div->shift - 1, div->shift);
+
+ ccu_update(common, ctrl, mask, target_div << div->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+static u8 ccu_mux_get_parent(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u8 parent;
+
+ parent = ccu_read(&mix->common, ctrl) >> mux->shift;
+ parent &= (1 << mux->width) - 1;
+
+ return parent;
+}
+
+static int ccu_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u32 mask;
+
+ mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
+
+ ccu_update(&mix->common, ctrl, mask, index << mux->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+const struct clk_ops spacemit_ccu_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+};
+
+const struct clk_ops spacemit_ccu_factor_ops = {
+ .round_rate = ccu_factor_round_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_factor_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .round_rate = ccu_factor_round_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_ops = {
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h
new file mode 100644
index 000000000000..51d19f5d6aac
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_MIX_H_
+#define _CCU_MIX_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_gate_config - Gate configuration
+ *
+ * @mask: Mask to enable the gate. Some clocks may have more than one bit
+ * set in this field.
+ */
+struct ccu_gate_config {
+ u32 mask;
+};
+
+struct ccu_factor_config {
+ u32 div;
+ u32 mul;
+};
+
+struct ccu_mux_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_div_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_mix {
+ struct ccu_factor_config factor;
+ struct ccu_gate_config gate;
+ struct ccu_div_config div;
+ struct ccu_mux_config mux;
+ struct ccu_common common;
+};
+
+#define CCU_GATE_INIT(_mask) { .mask = _mask }
+#define CCU_FACTOR_INIT(_div, _mul) { .div = _div, .mul = _mul }
+#define CCU_MUX_INIT(_shift, _width) { .shift = _shift, .width = _width }
+#define CCU_DIV_INIT(_shift, _width) { .shift = _shift, .width = _width }
+
+#define CCU_PARENT_HW(_parent) { .hw = &_parent.common.hw }
+#define CCU_PARENT_NAME(_name) { .fw_name = #_name }
+
+#define CCU_MIX_INITHW(_name, _parent, _ops, _flags) \
+ .hw.init = &(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = #_name, \
+ .parent_data = (const struct clk_parent_data[]) \
+ { _parent }, \
+ .num_parents = 1, \
+ .ops = &_ops, \
+ }
+
+#define CCU_MIX_INITHW_PARENTS(_name, _parents, _ops, _flags) \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(#_name, _parents, &_ops, _flags)
+
+#define CCU_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_FACTOR_DEFINE(_name, _parent, _div, _mul) \
+static struct ccu_mix _name = { \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_ops, 0), \
+ } \
+}
+
+#define CCU_MUX_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_DIV_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_ops, _flags) \
+ } \
+}
+
+#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, 0) \
+ } \
+}
+
+#define CCU_MUX_GATE_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_DIV_GATE_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_gate_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_MUX_DIV_GATE_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _muxshift, _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_fc, \
+ _mshift, _mwidth, _mask_fc, _muxshift, \
+ _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_fc, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth,\
+ _mask_fc, _muxshift, _muxwidth, _mask_gate, \
+ _flags) \
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_ctrl, _mshift,\
+ _mwidth, _mask_fc, _muxshift, _muxwidth, \
+ _mask_gate, _flags)
+
+#define CCU_MUX_DIV_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _mask_fc, _muxshift, _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_FC_DEFINE(_name, _parents, _reg_ctrl, _mask_fc, _muxshift, \
+ _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags) \
+ }, \
+}
+
+static inline struct ccu_mix *hw_to_ccu_mix(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_mix, common);
+}
+
+extern const struct clk_ops spacemit_ccu_gate_ops;
+extern const struct clk_ops spacemit_ccu_factor_ops;
+extern const struct clk_ops spacemit_ccu_mux_ops;
+extern const struct clk_ops spacemit_ccu_div_ops;
+extern const struct clk_ops spacemit_ccu_factor_gate_ops;
+extern const struct clk_ops spacemit_ccu_div_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_gate_ops;
+#endif /* _CCU_DIV_H_ */
diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c
new file mode 100644
index 000000000000..4427dcfbbb97
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/math.h>
+#include <linux/regmap.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+
+#define PLL_TIMEOUT_US 3000
+#define PLL_DELAY_US 5
+
+#define PLL_SWCR3_EN ((u32)BIT(31))
+#define PLL_SWCR3_MASK GENMASK(30, 0)
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_best_rate(struct ccu_pll *pll,
+ unsigned long rate)
+{
+ struct ccu_pll_config *config = &pll->config;
+ const struct ccu_pll_rate_tbl *best_entry;
+ unsigned long best_delta = ULONG_MAX;
+ int i;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+ unsigned long delta = abs_diff(entry->rate, rate);
+
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_entry = entry;
+ }
+ }
+
+ return best_entry;
+}
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_matched_entry(struct ccu_pll *pll)
+{
+ struct ccu_pll_config *config = &pll->config;
+ u32 swcr1, swcr3;
+ int i;
+
+ swcr1 = ccu_read(&pll->common, swcr1);
+ swcr3 = ccu_read(&pll->common, swcr3);
+ swcr3 &= PLL_SWCR3_MASK;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+
+ if (swcr1 == entry->swcr1 && swcr3 == entry->swcr3)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void ccu_pll_update_param(struct ccu_pll *pll, const struct ccu_pll_rate_tbl *entry)
+{
+ struct ccu_common *common = &pll->common;
+
+ regmap_write(common->regmap, common->reg_swcr1, entry->swcr1);
+ ccu_update(common, swcr3, PLL_SWCR3_MASK, entry->swcr3);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return ccu_read(common, swcr3) & PLL_SWCR3_EN;
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ struct ccu_common *common = &pll->common;
+ unsigned int tmp;
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, PLL_SWCR3_EN);
+
+ /* check lock status */
+ return regmap_read_poll_timeout_atomic(common->lock_regmap,
+ pll->config.reg_lock,
+ tmp,
+ tmp & pll->config.mask_lock,
+ PLL_DELAY_US, PLL_TIMEOUT_US);
+}
+
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, 0);
+}
+
+/*
+ * PLLs must be gated before changing rate, which is ensured by
+ * flag CLK_SET_RATE_GATE.
+ */
+static int ccu_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_best_rate(pll, rate);
+ ccu_pll_update_param(pll, entry);
+
+ return 0;
+}
+
+static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_matched_entry(pll);
+
+ WARN_ON_ONCE(!entry);
+
+ return entry ? entry->rate : -EINVAL;
+}
+
+static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return ccu_pll_lookup_best_rate(pll, rate)->rate;
+}
+
+static int ccu_pll_init(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ if (ccu_pll_lookup_matched_entry(pll))
+ return 0;
+
+ ccu_pll_disable(hw);
+ ccu_pll_update_param(pll, &pll->config.rate_tbl[0]);
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_pll_ops = {
+ .init = ccu_pll_init,
+ .enable = ccu_pll_enable,
+ .disable = ccu_pll_disable,
+ .set_rate = ccu_pll_set_rate,
+ .recalc_rate = ccu_pll_recalc_rate,
+ .round_rate = ccu_pll_round_rate,
+ .is_enabled = ccu_pll_is_enabled,
+};
diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h
new file mode 100644
index 000000000000..0592f4c3068c
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_PLL_H_
+#define _CCU_PLL_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_pll_rate_tbl - Structure mapping between PLL rate and register
+ * configuration.
+ *
+ * @rate: PLL rate
+ * @swcr1: Register value of PLLX_SW1_CTRL (PLLx_SWCR1).
+ * @swcr3: Register value of the PLLx_SW3_CTRL's lowest 31 bits of
+ * PLLx_SW3_CTRL (PLLx_SWCR3). This highest bit is for enabling
+ * the PLL and not contained in this field.
+ */
+struct ccu_pll_rate_tbl {
+ unsigned long rate;
+ u32 swcr1;
+ u32 swcr3;
+};
+
+struct ccu_pll_config {
+ const struct ccu_pll_rate_tbl *rate_tbl;
+ u32 tbl_num;
+ u32 reg_lock;
+ u32 mask_lock;
+};
+
+#define CCU_PLL_RATE(_rate, _swcr1, _swcr3) \
+ { \
+ .rate = _rate, \
+ .swcr1 = _swcr1, \
+ .swcr3 = _swcr3, \
+ }
+
+struct ccu_pll {
+ struct ccu_common common;
+ struct ccu_pll_config config;
+};
+
+#define CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock) \
+ { \
+ .rate_tbl = _table, \
+ .tbl_num = ARRAY_SIZE(_table), \
+ .reg_lock = (_reg_lock), \
+ .mask_lock = (_mask_lock), \
+ }
+
+#define CCU_PLL_HWINIT(_name, _flags) \
+ (&(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &spacemit_ccu_pll_ops, \
+ .parent_data = &(struct clk_parent_data) { .index = 0 }, \
+ .num_parents = 1, \
+ .flags = _flags, \
+ })
+
+#define CCU_PLL_DEFINE(_name, _table, _reg_swcr1, _reg_swcr3, _reg_lock, \
+ _mask_lock, _flags) \
+static struct ccu_pll _name = { \
+ .config = CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock), \
+ .common = { \
+ .reg_swcr1 = _reg_swcr1, \
+ .reg_swcr3 = _reg_swcr3, \
+ .hw.init = CCU_PLL_HWINIT(_name, _flags) \
+ } \
+}
+
+static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_pll, common);
+}
+
+extern const struct clk_ops spacemit_ccu_pll_ops;
+
+#endif
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 5830a9d87bf2..8896fd052ef1 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -9,123 +9,123 @@ if SUNXI_CCU
config SUNIV_F1C100S_CCU
tristate "Support for the Allwinner newer F1C100s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUNIV || COMPILE_TEST
config SUN20I_D1_CCU
tristate "Support for the Allwinner D1/R528/T113 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN20I_D1_R_CCU
tristate "Support for the Allwinner D1/R528/T113 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN50I_A64_CCU
tristate "Support for the Allwinner A64 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_CCU
tristate "Support for the Allwinner A100 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_R_CCU
tristate "Support for the Allwinner A100 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_CCU
tristate "Support for the Allwinner H6 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H616_CCU
tristate "Support for the Allwinner H616 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_R_CCU
tristate "Support for the Allwinner H6 and H616 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN55I_A523_CCU
tristate "Support for the Allwinner A523/T527 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN55I_A523_R_CCU
tristate "Support for the Allwinner A523/T527 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN4I_A10_CCU
tristate "Support for the Allwinner A10/A20 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN5I || COMPILE_TEST
depends on SUNXI_CCU=y
config SUN6I_A31_CCU
tristate "Support for the Allwinner A31/A31s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN6I || COMPILE_TEST
config SUN6I_RTC_CCU
tristate "Support for the Allwinner H616/R329 RTC CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_A23_CCU
tristate "Support for the Allwinner A23 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A33_CCU
tristate "Support for the Allwinner A33 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A83T_CCU
tristate "Support for the Allwinner A83T CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
tristate "Support for the Allwinner H3 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
config SUN8I_V3S_CCU
tristate "Support for the Allwinner V3s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_DE2_CCU
tristate "Support for the Allwinner SoCs DE2 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_R40_CCU
tristate "Support for the Allwinner R40 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN9I_A80_CCU
tristate "Support for the Allwinner A80 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN9I || COMPILE_TEST
config SUN8I_R_CCU
tristate "Support for Allwinner SoCs' PRCM CCUs"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index daa462c7d477..955c614830fa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -1094,6 +1094,7 @@ static const struct ccu_reset_map sun50i_h616_ccu_resets[] = {
[RST_BUS_TCON_LCD1] = { 0xb7c, BIT(17) },
[RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
[RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
+ [RST_BUS_LVDS] = { 0xbac, BIT(16) },
[RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
[RST_BUS_TVE0] = { 0xbbc, BIT(17) },
[RST_BUS_HDCP] = { 0xc4c, BIT(16) },
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index f2aa71206bc2..a6cd0f988859 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -239,6 +240,16 @@ static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
.num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
};
+static const struct sunxi_ccu_desc sun50i_h616_de33_clk_desc = {
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
+
+ .hw_clks = &sun8i_h3_de2_hw_clks,
+
+ .resets = sun50i_h5_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
+};
+
static int sunxi_de2_clk_probe(struct platform_device *pdev)
{
struct clk *bus_clk, *mod_clk;
@@ -291,6 +302,16 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
goto err_disable_mod_clk;
}
+ /*
+ * The DE33 requires these additional (unknown) registers set
+ * during initialisation.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun50i-h616-de33-clk")) {
+ writel(0, reg + 0x24);
+ writel(0x0000a980, reg + 0x28);
+ }
+
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, ccu_desc);
if (ret)
goto err_assert_reset;
@@ -335,6 +356,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.compatible = "allwinner,sun50i-h6-de3-clk",
.data = &sun50i_h5_de2_clk_desc,
},
+ {
+ .compatible = "allwinner,sun50i-h616-de33-clk",
+ .data = &sun50i_h616_de33_clk_desc,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sunxi_de2_clk_ids);
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
index 1c4e543366dd..5e2f92bfe412 100644
--- a/drivers/clk/sunxi/Kconfig
+++ b/drivers/clk/sunxi/Kconfig
@@ -2,13 +2,13 @@
menuconfig CLK_SUNXI
bool "Legacy clock support for Allwinner SoCs"
depends on (ARM && ARCH_SUNXI) || COMPILE_TEST
- default y
+ default (ARM && ARCH_SUNXI)
if CLK_SUNXI
config CLK_SUNXI_CLOCKS
bool "Legacy clock drivers"
- default y
+ default ARCH_SUNXI
help
Legacy clock drivers being used on older (A10, A13, A20,
A23, A31, A80) SoCs. These drivers are kept around for
@@ -19,14 +19,14 @@ config CLK_SUNXI_CLOCKS
config CLK_SUNXI_PRCM_SUN6I
bool "Legacy A31 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A31 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
config CLK_SUNXI_PRCM_SUN8I
bool "Legacy sun8i PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the sun8i family PRCM clocks.
Those are usually needed for the PMIC communication,
@@ -34,7 +34,7 @@ config CLK_SUNXI_PRCM_SUN8I
config CLK_SUNXI_PRCM_SUN9I
bool "Legacy A80 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A80 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 4c9555fc6184..ebfb1d59401d 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -847,6 +847,67 @@ static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, BIT(3), 0);
static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, BIT(2), 0);
static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, BIT(1), 0);
+static CCU_GATE(CLK_AXI4_VO_ACLK, axi4_vo_aclk, "axi4-vo-aclk",
+ video_pll_clk_pd, 0x0, BIT(0), 0);
+static CCU_GATE(CLK_GPU_CORE, gpu_core_clk, "gpu-core-clk", video_pll_clk_pd,
+ 0x0, BIT(3), 0);
+static CCU_GATE(CLK_GPU_CFG_ACLK, gpu_cfg_aclk, "gpu-cfg-aclk",
+ video_pll_clk_pd, 0x0, BIT(4), 0);
+static CCU_GATE(CLK_DPU_PIXELCLK0, dpu0_pixelclk, "dpu0-pixelclk",
+ video_pll_clk_pd, 0x0, BIT(5), 0);
+static CCU_GATE(CLK_DPU_PIXELCLK1, dpu1_pixelclk, "dpu1-pixelclk",
+ video_pll_clk_pd, 0x0, BIT(6), 0);
+static CCU_GATE(CLK_DPU_HCLK, dpu_hclk, "dpu-hclk", video_pll_clk_pd, 0x0,
+ BIT(7), 0);
+static CCU_GATE(CLK_DPU_ACLK, dpu_aclk, "dpu-aclk", video_pll_clk_pd, 0x0,
+ BIT(8), 0);
+static CCU_GATE(CLK_DPU_CCLK, dpu_cclk, "dpu-cclk", video_pll_clk_pd, 0x0,
+ BIT(9), 0);
+static CCU_GATE(CLK_HDMI_SFR, hdmi_sfr_clk, "hdmi-sfr-clk", video_pll_clk_pd,
+ 0x0, BIT(10), 0);
+static CCU_GATE(CLK_HDMI_PCLK, hdmi_pclk, "hdmi-pclk", video_pll_clk_pd, 0x0,
+ BIT(11), 0);
+static CCU_GATE(CLK_HDMI_CEC, hdmi_cec_clk, "hdmi-cec-clk", video_pll_clk_pd,
+ 0x0, BIT(12), 0);
+static CCU_GATE(CLK_MIPI_DSI0_PCLK, mipi_dsi0_pclk, "mipi-dsi0-pclk",
+ video_pll_clk_pd, 0x0, BIT(13), 0);
+static CCU_GATE(CLK_MIPI_DSI1_PCLK, mipi_dsi1_pclk, "mipi-dsi1-pclk",
+ video_pll_clk_pd, 0x0, BIT(14), 0);
+static CCU_GATE(CLK_MIPI_DSI0_CFG, mipi_dsi0_cfg_clk, "mipi-dsi0-cfg-clk",
+ video_pll_clk_pd, 0x0, BIT(15), 0);
+static CCU_GATE(CLK_MIPI_DSI1_CFG, mipi_dsi1_cfg_clk, "mipi-dsi1-cfg-clk",
+ video_pll_clk_pd, 0x0, BIT(16), 0);
+static CCU_GATE(CLK_MIPI_DSI0_REFCLK, mipi_dsi0_refclk, "mipi-dsi0-refclk",
+ video_pll_clk_pd, 0x0, BIT(17), 0);
+static CCU_GATE(CLK_MIPI_DSI1_REFCLK, mipi_dsi1_refclk, "mipi-dsi1-refclk",
+ video_pll_clk_pd, 0x0, BIT(18), 0);
+static CCU_GATE(CLK_HDMI_I2S, hdmi_i2s_clk, "hdmi-i2s-clk", video_pll_clk_pd,
+ 0x0, BIT(19), 0);
+static CCU_GATE(CLK_X2H_DPU1_ACLK, x2h_dpu1_aclk, "x2h-dpu1-aclk",
+ video_pll_clk_pd, 0x0, BIT(20), 0);
+static CCU_GATE(CLK_X2H_DPU_ACLK, x2h_dpu_aclk, "x2h-dpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(21), 0);
+static CCU_GATE(CLK_AXI4_VO_PCLK, axi4_vo_pclk, "axi4-vo-pclk",
+ video_pll_clk_pd, 0x0, BIT(22), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU_PCLK, iopmp_vosys_dpu_pclk,
+ "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, BIT(23), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU1_PCLK, iopmp_vosys_dpu1_pclk,
+ "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, BIT(24), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_GPU_PCLK, iopmp_vosys_gpu_pclk,
+ "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, BIT(25), 0);
+static CCU_GATE(CLK_IOPMP_DPU1_ACLK, iopmp_dpu1_aclk, "iopmp-dpu1-aclk",
+ video_pll_clk_pd, 0x0, BIT(27), 0);
+static CCU_GATE(CLK_IOPMP_DPU_ACLK, iopmp_dpu_aclk, "iopmp-dpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(28), 0);
+static CCU_GATE(CLK_IOPMP_GPU_ACLK, iopmp_gpu_aclk, "iopmp-gpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(29), 0);
+static CCU_GATE(CLK_MIPIDSI0_PIXCLK, mipi_dsi0_pixclk, "mipi-dsi0-pixclk",
+ video_pll_clk_pd, 0x0, BIT(30), 0);
+static CCU_GATE(CLK_MIPIDSI1_PIXCLK, mipi_dsi1_pixclk, "mipi-dsi1-pixclk",
+ video_pll_clk_pd, 0x0, BIT(31), 0);
+static CCU_GATE(CLK_HDMI_PIXCLK, hdmi_pixclk, "hdmi-pixclk", video_pll_clk_pd,
+ 0x4, BIT(0), 0);
+
static CLK_FIXED_FACTOR_HW(gmac_pll_clk_100m, "gmac-pll-clk-100m",
&gmac_pll_clk.common.hw, 10, 1, 0);
@@ -963,7 +1024,38 @@ static struct ccu_common *th1520_gate_clks[] = {
&sram3_clk.common,
};
-#define NR_CLKS (CLK_UART_SCLK + 1)
+static struct ccu_common *th1520_vo_gate_clks[] = {
+ &axi4_vo_aclk.common,
+ &gpu_core_clk.common,
+ &gpu_cfg_aclk.common,
+ &dpu0_pixelclk.common,
+ &dpu1_pixelclk.common,
+ &dpu_hclk.common,
+ &dpu_aclk.common,
+ &dpu_cclk.common,
+ &hdmi_sfr_clk.common,
+ &hdmi_pclk.common,
+ &hdmi_cec_clk.common,
+ &mipi_dsi0_pclk.common,
+ &mipi_dsi1_pclk.common,
+ &mipi_dsi0_cfg_clk.common,
+ &mipi_dsi1_cfg_clk.common,
+ &mipi_dsi0_refclk.common,
+ &mipi_dsi1_refclk.common,
+ &hdmi_i2s_clk.common,
+ &x2h_dpu1_aclk.common,
+ &x2h_dpu_aclk.common,
+ &axi4_vo_pclk.common,
+ &iopmp_vosys_dpu_pclk.common,
+ &iopmp_vosys_dpu1_pclk.common,
+ &iopmp_vosys_gpu_pclk.common,
+ &iopmp_dpu1_aclk.common,
+ &iopmp_dpu_aclk.common,
+ &iopmp_gpu_aclk.common,
+ &mipi_dsi0_pixclk.common,
+ &mipi_dsi1_pixclk.common,
+ &hdmi_pixclk.common
+};
static const struct regmap_config th1520_clk_regmap_config = {
.reg_bits = 32,
@@ -972,8 +1064,44 @@ static const struct regmap_config th1520_clk_regmap_config = {
.fast_io = true,
};
+struct th1520_plat_data {
+ struct ccu_common **th1520_pll_clks;
+ struct ccu_common **th1520_div_clks;
+ struct ccu_common **th1520_mux_clks;
+ struct ccu_common **th1520_gate_clks;
+
+ int nr_clks;
+ int nr_pll_clks;
+ int nr_div_clks;
+ int nr_mux_clks;
+ int nr_gate_clks;
+};
+
+static const struct th1520_plat_data th1520_ap_platdata = {
+ .th1520_pll_clks = th1520_pll_clks,
+ .th1520_div_clks = th1520_div_clks,
+ .th1520_mux_clks = th1520_mux_clks,
+ .th1520_gate_clks = th1520_gate_clks,
+
+ .nr_clks = CLK_UART_SCLK + 1,
+
+ .nr_pll_clks = ARRAY_SIZE(th1520_pll_clks),
+ .nr_div_clks = ARRAY_SIZE(th1520_div_clks),
+ .nr_mux_clks = ARRAY_SIZE(th1520_mux_clks),
+ .nr_gate_clks = ARRAY_SIZE(th1520_gate_clks),
+};
+
+static const struct th1520_plat_data th1520_vo_platdata = {
+ .th1520_gate_clks = th1520_vo_gate_clks,
+
+ .nr_clks = CLK_HDMI_PIXCLK + 1,
+
+ .nr_gate_clks = ARRAY_SIZE(th1520_vo_gate_clks),
+};
+
static int th1520_clk_probe(struct platform_device *pdev)
{
+ const struct th1520_plat_data *plat_data;
struct device *dev = &pdev->dev;
struct clk_hw_onecell_data *priv;
@@ -982,11 +1110,16 @@ static int th1520_clk_probe(struct platform_device *pdev)
struct clk_hw *hw;
int ret, i;
- priv = devm_kzalloc(dev, struct_size(priv, hws, NR_CLKS), GFP_KERNEL);
+ plat_data = device_get_match_data(&pdev->dev);
+ if (!plat_data)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "No device match data found\n");
+
+ priv = devm_kzalloc(dev, struct_size(priv, hws, plat_data->nr_clks), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->num = NR_CLKS;
+ priv->num = plat_data->nr_clks;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -996,35 +1129,35 @@ static int th1520_clk_probe(struct platform_device *pdev)
if (IS_ERR(map))
return PTR_ERR(map);
- for (i = 0; i < ARRAY_SIZE(th1520_pll_clks); i++) {
- struct ccu_pll *cp = hw_to_ccu_pll(&th1520_pll_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_pll_clks; i++) {
+ struct ccu_pll *cp = hw_to_ccu_pll(&plat_data->th1520_pll_clks[i]->hw);
- th1520_pll_clks[i]->map = map;
+ plat_data->th1520_pll_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_pll_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_pll_clks[i]->hw);
if (ret)
return ret;
priv->hws[cp->common.clkid] = &cp->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_div_clks); i++) {
- struct ccu_div *cd = hw_to_ccu_div(&th1520_div_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_div_clks; i++) {
+ struct ccu_div *cd = hw_to_ccu_div(&plat_data->th1520_div_clks[i]->hw);
- th1520_div_clks[i]->map = map;
+ plat_data->th1520_div_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_div_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_div_clks[i]->hw);
if (ret)
return ret;
priv->hws[cd->common.clkid] = &cd->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_mux_clks); i++) {
- struct ccu_mux *cm = hw_to_ccu_mux(&th1520_mux_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_mux_clks; i++) {
+ struct ccu_mux *cm = hw_to_ccu_mux(&plat_data->th1520_mux_clks[i]->hw);
const struct clk_init_data *init = cm->common.hw.init;
- th1520_mux_clks[i]->map = map;
+ plat_data->th1520_mux_clks[i]->map = map;
hw = devm_clk_hw_register_mux_parent_data_table(dev,
init->name,
init->parent_data,
@@ -1040,10 +1173,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
priv->hws[cm->common.clkid] = hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_gate_clks); i++) {
- struct ccu_gate *cg = hw_to_ccu_gate(&th1520_gate_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_gate_clks; i++) {
+ struct ccu_gate *cg = hw_to_ccu_gate(&plat_data->th1520_gate_clks[i]->hw);
- th1520_gate_clks[i]->map = map;
+ plat_data->th1520_gate_clks[i]->map = map;
hw = devm_clk_hw_register_gate_parent_data(dev,
cg->common.hw.init->name,
@@ -1057,19 +1190,21 @@ static int th1520_clk_probe(struct platform_device *pdev)
priv->hws[cg->common.clkid] = hw;
}
- ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
- if (ret)
- return ret;
- priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
+ if (plat_data == &th1520_ap_platdata) {
+ ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
- ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
- if (ret)
- return ret;
- priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+ ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
- ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
- if (ret)
- return ret;
+ ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
+ if (ret)
+ return ret;
+ }
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
if (ret)
@@ -1081,6 +1216,11 @@ static int th1520_clk_probe(struct platform_device *pdev)
static const struct of_device_id th1520_clk_match[] = {
{
.compatible = "thead,th1520-clk-ap",
+ .data = &th1520_ap_platdata,
+ },
+ {
+ .compatible = "thead,th1520-clk-vo",
+ .data = &th1520_vo_platdata,
},
{ /* sentinel */ },
};
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index d64b07ec48e5..78702a08364f 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -217,6 +217,18 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_DT_RUST
+ tristate "Rust based Generic DT based cpufreq driver"
+ depends on HAVE_CLK && OF && RUST
+ select CPUFREQ_DT_PLATDEV
+ select PM_OPP
+ help
+ This adds a Rust based generic DT based cpufreq driver for frequency
+ management. It supports both uniprocessor (UP) and symmetric
+ multiprocessor (SMP) systems.
+
+ If in doubt, say N.
+
config CPUFREQ_VIRT
tristate "Virtual cpufreq driver"
depends on GENERIC_ARCH_TOPOLOGY
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 22ab45209f9b..d38526b8e063 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index ea4b8f220a05..4f7f9201598d 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -660,7 +660,7 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
nominal_perf = perf_caps.nominal_perf;
if (nominal_freq)
- *nominal_freq = perf_caps.nominal_freq;
+ *nominal_freq = perf_caps.nominal_freq * 1000;
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
new file mode 100644
index 000000000000..94ed81644fe1
--- /dev/null
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust based implementation of the cpufreq-dt driver.
+
+use kernel::{
+ c_str,
+ clk::Clk,
+ cpu, cpufreq,
+ cpumask::CpumaskVar,
+ device::{Core, Device},
+ error::code::*,
+ fmt,
+ macros::vtable,
+ module_platform_driver, of, opp, platform,
+ prelude::*,
+ str::CString,
+ sync::Arc,
+};
+
+/// Finds exact supply name from the OF node.
+fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
+ let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?;
+ dev.property_present(&prop_name)
+ .then(|| CString::try_from_fmt(fmt!("{name}")).ok())
+ .flatten()
+}
+
+/// Finds supply name for the CPU from DT.
+fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> {
+ // Try "cpu0" for older DTs, fallback to "cpu".
+ let name = (cpu == 0)
+ .then(|| find_supply_name_exact(dev, "cpu0"))
+ .flatten()
+ .or_else(|| find_supply_name_exact(dev, "cpu"))?;
+
+ let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?;
+ list.push(name, GFP_KERNEL).ok()?;
+
+ Some(list)
+}
+
+/// Represents the cpufreq dt device.
+struct CPUFreqDTDevice {
+ opp_table: opp::Table,
+ freq_table: opp::FreqTable,
+ _mask: CpumaskVar,
+ _token: Option<opp::ConfigToken>,
+ _clk: Clk,
+}
+
+#[derive(Default)]
+struct CPUFreqDTDriver;
+
+#[vtable]
+impl opp::ConfigOps for CPUFreqDTDriver {}
+
+#[vtable]
+impl cpufreq::Driver for CPUFreqDTDriver {
+ const NAME: &'static CStr = c_str!("cpufreq-dt");
+ const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
+ const BOOST_ENABLED: bool = true;
+
+ type PData = Arc<CPUFreqDTDevice>;
+
+ fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> {
+ let cpu = policy.cpu();
+ // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq
+ // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device,
+ // once the CPU is hot-unplugged.
+ let dev = unsafe { cpu::from_cpu(cpu)? };
+ let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?;
+
+ mask.set(cpu);
+
+ let token = find_supply_names(dev, cpu)
+ .map(|names| {
+ opp::Config::<Self>::new()
+ .set_regulator_names(names)?
+ .set(dev)
+ })
+ .transpose()?;
+
+ // Get OPP-sharing information from "operating-points-v2" bindings.
+ let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) {
+ Ok(()) => false,
+ Err(e) if e == ENOENT => {
+ // "operating-points-v2" not supported. If the platform hasn't
+ // set sharing CPUs, fallback to all CPUs share the `Policy`
+ // for backward compatibility.
+ opp::Table::sharing_cpus(dev, &mut mask).is_err()
+ }
+ Err(e) => return Err(e),
+ };
+
+ // Initialize OPP tables for all policy cpus.
+ //
+ // For platforms not using "operating-points-v2" bindings, we do this
+ // before updating policy cpus. Otherwise, we will end up creating
+ // duplicate OPPs for the CPUs.
+ //
+ // OPPs might be populated at runtime, don't fail for error here unless
+ // it is -EPROBE_DEFER.
+ let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) {
+ Ok(table) => table,
+ Err(e) => {
+ if e == EPROBE_DEFER {
+ return Err(e);
+ }
+
+ // The table is added dynamically ?
+ opp::Table::from_dev(dev)?
+ }
+ };
+
+ // The OPP table must be initialized, statically or dynamically, by this point.
+ opp_table.opp_count()?;
+
+ // Set sharing cpus for fallback scenario.
+ if fallback {
+ mask.setall();
+ opp_table.set_sharing_cpus(&mut mask)?;
+ }
+
+ let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
+ if transition_latency == 0 {
+ transition_latency = cpufreq::ETERNAL_LATENCY_NS;
+ }
+
+ policy
+ .set_dvfs_possible_from_any_cpu(true)
+ .set_suspend_freq(opp_table.suspend_freq())
+ .set_transition_latency_ns(transition_latency);
+
+ let freq_table = opp_table.cpufreq_table()?;
+ // SAFETY: The `freq_table` is not dropped while it is getting used by the C code.
+ unsafe { policy.set_freq_table(&freq_table) };
+
+ // SAFETY: The returned `clk` is not dropped while it is getting used by the C code.
+ let clk = unsafe { policy.set_clk(dev, None)? };
+
+ mask.copy(policy.cpus());
+
+ Ok(Arc::new(
+ CPUFreqDTDevice {
+ opp_table,
+ freq_table,
+ _mask: mask,
+ _token: token,
+ _clk: clk,
+ },
+ GFP_KERNEL,
+ )?)
+ }
+
+ fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result {
+ Ok(())
+ }
+
+ fn online(_policy: &mut cpufreq::Policy) -> Result {
+ // We did light-weight tear down earlier, nothing to do here.
+ Ok(())
+ }
+
+ fn offline(_policy: &mut cpufreq::Policy) -> Result {
+ // Preserve policy->data and don't free resources on light-weight
+ // tear down.
+ Ok(())
+ }
+
+ fn suspend(policy: &mut cpufreq::Policy) -> Result {
+ policy.generic_suspend()
+ }
+
+ fn verify(data: &mut cpufreq::PolicyData) -> Result {
+ data.generic_verify()
+ }
+
+ fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result {
+ let Some(data) = policy.data::<Self::PData>() else {
+ return Err(ENOENT);
+ };
+
+ let freq = data.freq_table.freq(index)?;
+ data.opp_table.set_rate(freq)
+ }
+
+ fn get(policy: &mut cpufreq::Policy) -> Result<u32> {
+ policy.generic_get()
+ }
+
+ fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result {
+ Ok(())
+ }
+
+ fn register_em(policy: &mut cpufreq::Policy) {
+ policy.register_em_opp()
+ }
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <CPUFreqDTDriver as platform::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("operating-points-v2")), ())]
+);
+
+impl platform::Driver for CPUFreqDTDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _id_info: Option<&Self::IdInfo>,
+ ) -> Result<Pin<KBox<Self>>> {
+ cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
+ Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ }
+}
+
+module_platform_driver! {
+ type: CPUFreqDTDriver,
+ name: "cpufreq-dt",
+ author: "Viresh Kumar <viresh.kumar@linaro.org>",
+ description: "Generic CPUFreq DT driver",
+ license: "GPL v2",
+}
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 944e899eb1be..ef078426bfd5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -393,6 +393,40 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.set_boost = cpufreq_boost_set_sw,
};
+static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
+{
+ struct device_node *scmi_np = dev_of_node(scmi_dev);
+ struct device_node *cpu_np, *np;
+ struct device *cpu_dev;
+ int cpu, idx;
+
+ if (!scmi_np)
+ return false;
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ cpu_np = dev_of_node(cpu_dev);
+
+ np = of_parse_phandle(cpu_np, "clocks", 0);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+
+ idx = of_property_match_string(cpu_np, "power-domain-names", "perf");
+ np = of_parse_phandle(cpu_np, "power-domains", idx);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+ }
+
+ return false;
+}
+
static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
@@ -401,7 +435,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
handle = sdev->handle;
- if (!handle)
+ if (!handle || !scmi_dev_used_by_cpus(dev))
return -ENODEV;
scmi_cpufreq_driver.driver_data = sdev;
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 3c2756a539c4..4e1ba35deda9 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -456,14 +456,13 @@ static struct faux_device_ops psci_cpuidle_ops = {
static bool __init dt_idle_state_present(void)
{
- struct device_node *cpu_node __free(device_node);
- struct device_node *state_node __free(device_node);
-
- cpu_node = of_cpu_device_node_get(cpumask_first(cpu_possible_mask));
+ struct device_node *cpu_node __free(device_node) =
+ of_cpu_device_node_get(cpumask_first(cpu_possible_mask));
if (!cpu_node)
return false;
- state_node = of_get_cpu_state_node(cpu_node, 0);
+ struct device_node *state_node __free(device_node) =
+ of_get_cpu_state_node(cpu_node, 0);
if (!state_node)
return false;
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 760b7d81fcd8..80355d03004d 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -702,27 +702,6 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd)
idxd->pasid = IOMMU_PASID_INVALID;
}
-static int idxd_enable_sva(struct pci_dev *pdev)
-{
- int ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- return ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret)
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-
- return ret;
-}
-
-static void idxd_disable_sva(struct pci_dev *pdev)
-{
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-}
-
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -737,17 +716,13 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- if (idxd_enable_sva(pdev)) {
- dev_warn(dev, "Unable to turn on user SVA feature.\n");
- } else {
- set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+ set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- rc = idxd_enable_system_pasid(idxd);
- if (rc)
- dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
- else
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
- }
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc)
+ dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -785,8 +760,6 @@ static int idxd_probe(struct idxd_device *idxd)
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
return rc;
}
@@ -797,8 +770,6 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(idxd->pdev);
}
/*
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 5fe61b9ab5f9..db8c5c03d3a2 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -281,6 +281,30 @@ config EFI_EMBEDDED_FIRMWARE
bool
select CRYPTO_LIB_SHA256
+config EFI_SBAT
+ def_bool y if EFI_SBAT_FILE!=""
+
+config EFI_SBAT_FILE
+ string "Embedded SBAT section file path"
+ depends on EFI_ZBOOT
+ help
+ SBAT section provides a way to improve SecureBoot revocations of UEFI
+ binaries by introducing a generation-based mechanism. With SBAT, older
+ UEFI binaries can be prevented from booting by bumping the minimal
+ required generation for the specific component in the bootloader.
+
+ Note: SBAT information is distribution specific, i.e. the owner of the
+ signing SecureBoot certificate must define the SBAT policy. Linux
+ kernel upstream does not define SBAT components and their generations.
+
+ See https://github.com/rhboot/shim/blob/main/SBAT.md for the additional
+ details.
+
+ Specify a file with SBAT data which is going to be embedded as '.sbat'
+ section into the kernel.
+
+ If unsure, leave blank.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 48842b5c106b..92e3c73502ba 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -44,6 +44,10 @@ AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE
$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
$(call if_changed_rule,as_o_S)
+ifneq ($(CONFIG_EFI_SBAT_FILE),)
+$(obj)/zboot-header.o: $(CONFIG_EFI_SBAT_FILE)
+endif
+
ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a
LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index fd6dc790c5a8..7aa2f9ad2935 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -601,6 +601,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
* @image: EFI loaded image protocol
* @soft_limit: preferred address for loading the initrd
* @hard_limit: upper limit address for loading the initrd
+ * @out: pointer to store the address of the initrd table
*
* Return: status code
*/
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
index fb676ded47fa..b6431edd0fc9 100644
--- a/drivers/firmware/efi/libstub/zboot-header.S
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -4,17 +4,17 @@
#ifdef CONFIG_64BIT
.set .Lextra_characteristics, 0x0
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR64_MAGIC
#else
.set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR32_MAGIC
#endif
.section ".head", "a"
.globl __efistub_efi_zboot_header
__efistub_efi_zboot_header:
.Ldoshdr:
- .long MZ_MAGIC
+ .long IMAGE_DOS_SIGNATURE
.ascii "zimg" // image type
.long __efistub__gzdata_start - .Ldoshdr // payload offset
.long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
@@ -25,7 +25,7 @@ __efistub_efi_zboot_header:
.long .Lpehdr - .Ldoshdr // PE header offset
.Lpehdr:
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.short MACHINE_TYPE
.short .Lsection_count
.long 0
@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
- .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
+ .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
@@ -123,11 +123,29 @@ __efistub_efi_zboot_header:
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_EXECUTE
+#ifdef CONFIG_EFI_SBAT
+ .ascii ".sbat\0\0\0"
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_DISCARDABLE
+
+ .pushsection ".sbat", "a", @progbits
+ .incbin CONFIG_EFI_SBAT_FILE
+ .popsection
+#endif
+
.ascii ".data\0\0\0"
.long __data_size
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long __data_rawsize
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long 0, 0
.short 0, 0
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index 9ecc57ff5b45..c3a166675450 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -29,7 +29,17 @@ SECTIONS
. = _etext;
}
+#ifdef CONFIG_EFI_SBAT
+ .sbat : ALIGN(4096) {
+ _sbat = .;
+ *(.sbat)
+ _esbat = ALIGN(4096);
+ . = _esbat;
+ }
+#endif
+
.data : ALIGN(4096) {
+ _data = .;
*(.data* .init.data*)
_edata = ALIGN(512);
. = _edata;
@@ -52,3 +62,4 @@ PROVIDE(__efistub__gzdata_size =
PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
PROVIDE(__data_size = ABSOLUTE(_end - _etext));
+PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat));
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 34109fd86c55..f1c04d7cfd71 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -43,7 +43,8 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
map.map = early_memremap(phys_map, data->size);
if (!map.map) {
- pr_err("Could not map the memory map!\n");
+ pr_err("Could not map the memory map! phys_map=%pa, size=0x%lx\n",
+ &phys_map, data->size);
return -ENOMEM;
}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 9e2628728aad..77b5f7ac3e20 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -361,6 +361,10 @@ static long efi_runtime_get_waketime(unsigned long arg)
getwakeuptime.enabled))
return -EFAULT;
+ if (getwakeuptime.pending && put_user(pending,
+ getwakeuptime.pending))
+ return -EFAULT;
+
if (getwakeuptime.time) {
if (copy_to_user(getwakeuptime.time, &efi_time,
sizeof(efi_time_t)))
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 7a01f2687b4c..740066ceaea3 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
+#include <linux/string_choices.h>
#include "i2c-algo-pcf.h"
@@ -316,7 +317,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
pmsg = &msgs[i];
DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n",
- pmsg->flags & I2C_M_RD ? "read" : "write",
+ str_read_write(pmsg->flags & I2C_M_RD),
pmsg->len, pmsg->addr, i + 1, num);)
ret = pcf_doAddress(adap, pmsg);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index bbbd6240fa6e..48c5ab832009 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -592,6 +592,17 @@ config I2C_DESIGNWARE_PLATFORM
This driver can also be built as a module. If so, the module
will be called i2c-designware-platform.
+config I2C_DESIGNWARE_AMDISP
+ tristate "Synopsys DesignWare Platform for AMDISP"
+ depends on DRM_AMD_ISP || COMPILE_TEST
+ depends on I2C_DESIGNWARE_CORE
+ help
+ If you say yes to this option, support will be included for the
+ AMDISP Synopsys DesignWare I2C adapter.
+
+ This driver can also be built as a module. If so, the module
+ will be called amd_isp_i2c_designware.
+
config I2C_DESIGNWARE_AMDPSP
bool "AMD PSP I2C semaphore support"
depends on ACPI
@@ -845,7 +856,7 @@ config I2C_LS2X
config I2C_MLXBF
tristate "Mellanox BlueField I2C controller"
- depends on MELLANOX_PLATFORM && ARM64
+ depends on (MELLANOX_PLATFORM && ARM64) || COMPILE_TEST
depends on ACPI
select I2C_SLAVE
help
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index c1252e2b779e..04db855fdfd6 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
i2c-designware-platform-y := i2c-designware-platdrv.o
i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_AMDPSP) += i2c-designware-amdpsp.o
i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
+obj-$(CONFIG_I2C_DESIGNWARE_AMDISP) += i2c-designware-amdisp.o
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-y := i2c-designware-pcidrv.o
obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index ee3b469ddfb9..374fc50bb205 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/string_choices.h>
#include "i2c-at91.h"
@@ -523,7 +524,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
*/
dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
- (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
+ str_read_write(dev->msg->flags & I2C_M_RD), dev->buf_len);
reinit_completion(&dev->cmd_complete);
dev->transfer_status = 0;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 332a0fcca28d..63bc3c8f49d3 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -224,11 +224,6 @@ static void slave_rx_tasklet_fn(unsigned long);
| BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
| BIT(IS_S_RX_THLD_SHIFT))
-static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
-static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable);
-
static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 offset)
{
@@ -264,8 +259,8 @@ static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
}
}
-static void bcm_iproc_i2c_slave_init(
- struct bcm_iproc_i2c_dev *iproc_i2c, bool need_reset)
+static void bcm_iproc_i2c_slave_init(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool need_reset)
{
u32 val;
@@ -276,8 +271,8 @@ static void bcm_iproc_i2c_slave_init(
val |= BIT(CFG_RESET_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- /* wait 100 usec per spec */
- udelay(100);
+ /* wait approximately 100 usec as per spec */
+ usleep_range(100, 200);
/* bring controller out of reset */
val &= ~(BIT(CFG_RESET_SHIFT));
@@ -316,6 +311,19 @@ static void bcm_iproc_i2c_slave_init(
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
}
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool enable)
+{
+ u32 val;
+
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+ if (enable)
+ val |= BIT(CFG_EN_SHIFT);
+ else
+ val &= ~BIT(CFG_EN_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
+}
+
static bool bcm_iproc_i2c_check_slave_status
(struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
{
@@ -438,7 +446,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 val;
u8 value;
-
if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
iproc_i2c->tx_underrun++;
if (iproc_i2c->tx_underrun == 1)
@@ -542,7 +549,7 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
{
struct i2c_msg *msg = iproc_i2c->msg;
- uint32_t val;
+ u32 val;
/* Read valid data from RX FIFO */
while (iproc_i2c->rx_bytes < msg->len) {
@@ -688,8 +695,8 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
val &= ~(BIT(CFG_EN_SHIFT));
iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- /* wait 100 usec per spec */
- udelay(100);
+ /* wait approximately 100 usec as per spec */
+ usleep_range(100, 200);
/* bring controller out of reset */
val &= ~(BIT(CFG_RESET_SHIFT));
@@ -708,19 +715,6 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, 0xffffffff);
}
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable)
-{
- u32 val;
-
- val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
- if (enable)
- val |= BIT(CFG_EN_SHIFT);
- else
- val &= ~BIT(CFG_EN_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
-}
-
static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg)
{
@@ -734,31 +728,31 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
return 0;
case M_CMD_STATUS_LOST_ARB:
- dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
+ dev_err(iproc_i2c->device, "lost bus arbitration\n");
return -EAGAIN;
case M_CMD_STATUS_NACK_ADDR:
- dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
+ dev_err(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
return -ENXIO;
case M_CMD_STATUS_NACK_DATA:
- dev_dbg(iproc_i2c->device, "NAK data\n");
+ dev_err(iproc_i2c->device, "NAK data\n");
return -ENXIO;
case M_CMD_STATUS_TIMEOUT:
- dev_dbg(iproc_i2c->device, "bus timeout\n");
+ dev_err(iproc_i2c->device, "bus timeout\n");
return -ETIMEDOUT;
case M_CMD_STATUS_FIFO_UNDERRUN:
- dev_dbg(iproc_i2c->device, "FIFO under-run\n");
+ dev_err(iproc_i2c->device, "FIFO under-run\n");
return -ENXIO;
case M_CMD_STATUS_RX_FIFO_FULL:
- dev_dbg(iproc_i2c->device, "RX FIFO full\n");
+ dev_err(iproc_i2c->device, "RX FIFO full\n");
return -ETIMEDOUT;
default:
- dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+ dev_err(iproc_i2c->device, "unknown error code=%d\n", val);
/* re-initialize i2c for recovery */
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
@@ -833,7 +827,7 @@ static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
* The i2c quirks are set to enforce this rule.
*/
static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
- struct i2c_msg *msgs, bool process_call)
+ struct i2c_msg *msgs, bool process_call)
{
int i;
u8 addr;
@@ -842,8 +836,8 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg = &msgs[0];
/* check if bus is busy */
- if (!!(iproc_i2c_rd_reg(iproc_i2c,
- M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT))) {
+ if (iproc_i2c_rd_reg(iproc_i2c,
+ M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT)) {
dev_warn(iproc_i2c->device, "bus is busy\n");
return -EBUSY;
}
@@ -970,14 +964,14 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
ret = bcm_iproc_i2c_xfer_internal(iproc_i2c, msgs, process_call);
if (ret) {
- dev_dbg(iproc_i2c->device, "xfer failed\n");
+ dev_err(iproc_i2c->device, "xfer failed\n");
return ret;
}
return num;
}
-static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
+static u32 bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
{
u32 val;
@@ -989,6 +983,63 @@ static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
return val;
}
+static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+
+ if (iproc_i2c->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ iproc_i2c->slave = slave;
+
+ tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
+ (unsigned long)iproc_i2c);
+
+ bcm_iproc_i2c_slave_init(iproc_i2c, false);
+
+ return 0;
+}
+
+static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+ u32 tmp;
+
+ if (!iproc_i2c->slave)
+ return -EINVAL;
+
+ disable_irq(iproc_i2c->irq);
+
+ tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
+ /* disable all slave interrupts */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
+ IE_S_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+
+ /* Erase the slave address programmed */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
+
+ /* flush TX/RX FIFOs */
+ tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
+
+ /* clear all pending slave interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
+
+ iproc_i2c->slave = NULL;
+
+ enable_irq(iproc_i2c->irq);
+
+ return 0;
+}
+
static struct i2c_algorithm bcm_iproc_algo = {
.master_xfer = bcm_iproc_i2c_xfer,
.functionality = bcm_iproc_i2c_functionality,
@@ -1010,21 +1061,18 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
"clock-frequency", &bus_speed);
if (ret < 0) {
dev_info(iproc_i2c->device,
- "unable to interpret clock-frequency DT property\n");
+ "unable to interpret clock-frequency DT property\n");
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
}
- if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ) {
- dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
- bus_speed);
- dev_err(iproc_i2c->device,
- "valid speeds are 100khz and 400khz\n");
- return -EINVAL;
- } else if (bus_speed < I2C_MAX_FAST_MODE_FREQ) {
+ if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ)
+ return dev_err_probe(iproc_i2c->device, -EINVAL,
+ "%d Hz not supported (out of 100-400 kHz range)\n",
+ bus_speed);
+ else if (bus_speed < I2C_MAX_FAST_MODE_FREQ)
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- } else {
+ else
bus_speed = I2C_MAX_FAST_MODE_FREQ;
- }
iproc_i2c->bus_speed = bus_speed;
val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
@@ -1039,9 +1087,9 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
static int bcm_iproc_i2c_probe(struct platform_device *pdev)
{
- int irq, ret = 0;
struct bcm_iproc_i2c_dev *iproc_i2c;
struct i2c_adapter *adap;
+ int irq, ret;
iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
GFP_KERNEL);
@@ -1066,11 +1114,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
ret = of_property_read_u32(iproc_i2c->device->of_node,
"brcm,ape-hsls-addr-mask",
&iproc_i2c->ape_addr_mask);
- if (ret < 0) {
- dev_err(iproc_i2c->device,
- "'brcm,ape-hsls-addr-mask' missing\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ return dev_err_probe(iproc_i2c->device, ret,
+ "'brcm,ape-hsls-addr-mask' missing\n");
spin_lock_init(&iproc_i2c->idm_lock);
@@ -1090,11 +1136,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(iproc_i2c->device, irq,
bcm_iproc_i2c_isr, 0, pdev->name,
iproc_i2c);
- if (ret < 0) {
- dev_err(iproc_i2c->device,
- "unable to request irq %i\n", irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(iproc_i2c->device, ret,
+ "unable to request irq %i\n", irq);
iproc_i2c->irq = irq;
} else {
@@ -1106,9 +1150,8 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
adap = &iproc_i2c->adapter;
i2c_set_adapdata(adap, iproc_i2c);
- snprintf(adap->name, sizeof(adap->name),
- "Broadcom iProc (%s)",
- of_node_full_name(iproc_i2c->device->of_node));
+ snprintf(adap->name, sizeof(adap->name), "Broadcom iProc (%s)",
+ of_node_full_name(iproc_i2c->device->of_node));
adap->algo = &bcm_iproc_algo;
adap->quirks = &bcm_iproc_i2c_quirks;
adap->dev.parent = &pdev->dev;
@@ -1182,62 +1225,6 @@ static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = {
.resume_early = &bcm_iproc_i2c_resume
};
-static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
-{
- struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
-
- if (iproc_i2c->slave)
- return -EBUSY;
-
- if (slave->flags & I2C_CLIENT_TEN)
- return -EAFNOSUPPORT;
-
- iproc_i2c->slave = slave;
-
- tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
- (unsigned long)iproc_i2c);
-
- bcm_iproc_i2c_slave_init(iproc_i2c, false);
- return 0;
-}
-
-static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
-{
- u32 tmp;
- struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
-
- if (!iproc_i2c->slave)
- return -EINVAL;
-
- disable_irq(iproc_i2c->irq);
-
- tasklet_kill(&iproc_i2c->slave_rx_tasklet);
-
- /* disable all slave interrupts */
- tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
- tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
- IE_S_ALL_INTERRUPT_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
-
- /* Erase the slave address programmed */
- tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
- tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
-
- /* flush TX/RX FIFOs */
- tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
- iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
-
- /* clear all pending slave interrupts */
- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
-
- iproc_i2c->slave = NULL;
-
- enable_irq(iproc_i2c->irq);
-
- return 0;
-}
-
static const struct of_device_id bcm_iproc_i2c_of_match[] = {
{
.compatible = "brcm,iproc-i2c",
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 6a909d339681..6a3d4e9e07f4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -551,7 +551,8 @@ out:
static u32 i2c_davinci_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
static void terminate_read(struct davinci_i2c_dev *dev)
diff --git a/drivers/i2c/busses/i2c-designware-amdisp.c b/drivers/i2c/busses/i2c-designware-amdisp.c
new file mode 100644
index 000000000000..ad6f08338124
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-amdisp.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Based on Synopsys DesignWare I2C adapter driver.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "i2c-designware-core.h"
+
+#define DRV_NAME "amd_isp_i2c_designware"
+#define AMD_ISP_I2C_INPUT_CLK 100 /* Mhz */
+
+static void amd_isp_dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *i2c_dev)
+{
+ pm_runtime_disable(i2c_dev->dev);
+
+ if (i2c_dev->shared_with_punit)
+ pm_runtime_put_noidle(i2c_dev->dev);
+}
+
+static inline u32 amd_isp_dw_i2c_get_clk_rate(struct dw_i2c_dev *i2c_dev)
+{
+ return AMD_ISP_I2C_INPUT_CLK * 1000;
+}
+
+static int amd_isp_dw_i2c_plat_probe(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *isp_i2c_dev;
+ struct i2c_adapter *adap;
+ int ret;
+
+ isp_i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*isp_i2c_dev), GFP_KERNEL);
+ if (!isp_i2c_dev)
+ return -ENOMEM;
+ isp_i2c_dev->dev = &pdev->dev;
+
+ pdev->dev.init_name = DRV_NAME;
+
+ /*
+ * Use the polling mode to send/receive the data, because
+ * no IRQ connection from ISP I2C
+ */
+ isp_i2c_dev->flags |= ACCESS_POLLING;
+ platform_set_drvdata(pdev, isp_i2c_dev);
+
+ isp_i2c_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(isp_i2c_dev->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(isp_i2c_dev->base),
+ "failed to get IOMEM resource\n");
+
+ isp_i2c_dev->get_clk_rate_khz = amd_isp_dw_i2c_get_clk_rate;
+ ret = i2c_dw_fw_parse_and_configure(isp_i2c_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to parse i2c dw fwnode and configure\n");
+
+ i2c_dw_configure(isp_i2c_dev);
+
+ adap = &isp_i2c_dev->adapter;
+ adap->owner = THIS_MODULE;
+ ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
+ adap->dev.of_node = pdev->dev.of_node;
+ /* use dynamically allocated adapter id */
+ adap->nr = -1;
+
+ if (isp_i2c_dev->flags & ACCESS_NO_IRQ_SUSPEND)
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE);
+ else
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND);
+
+ device_enable_async_suspend(&pdev->dev);
+
+ if (isp_i2c_dev->shared_with_punit)
+ pm_runtime_get_noresume(&pdev->dev);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = i2c_dw_probe(isp_i2c_dev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "i2c_dw_probe failed\n");
+ goto error_release_rpm;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+
+error_release_rpm:
+ amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev);
+ pm_runtime_put_sync(&pdev->dev);
+ return ret;
+}
+
+static void amd_isp_dw_i2c_plat_remove(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *isp_i2c_dev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ i2c_del_adapter(&isp_i2c_dev->adapter);
+
+ i2c_dw_disable(isp_i2c_dev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev);
+}
+
+static int amd_isp_dw_i2c_plat_prepare(struct device *dev)
+{
+ /*
+ * If the ACPI companion device object is present for this device, it
+ * may be accessed during suspend and resume of other devices via I2C
+ * operation regions, so tell the PM core and middle layers to avoid
+ * skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(dev);
+}
+
+static int amd_isp_dw_i2c_plat_runtime_suspend(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ if (i_dev->shared_with_punit)
+ return 0;
+
+ i2c_dw_disable(i_dev);
+ i2c_dw_prepare_clk(i_dev, false);
+
+ return 0;
+}
+
+static int amd_isp_dw_i2c_plat_suspend(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!i_dev)
+ return -ENODEV;
+
+ ret = amd_isp_dw_i2c_plat_runtime_suspend(dev);
+ if (!ret)
+ i2c_mark_adapter_suspended(&i_dev->adapter);
+
+ return ret;
+}
+
+static int amd_isp_dw_i2c_plat_runtime_resume(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ if (!i_dev)
+ return -ENODEV;
+
+ if (!i_dev->shared_with_punit)
+ i2c_dw_prepare_clk(i_dev, true);
+ if (i_dev->init)
+ i_dev->init(i_dev);
+
+ return 0;
+}
+
+static int amd_isp_dw_i2c_plat_resume(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ amd_isp_dw_i2c_plat_runtime_resume(dev);
+ i2c_mark_adapter_resumed(&i_dev->adapter);
+
+ return 0;
+}
+
+static const struct dev_pm_ops amd_isp_dw_i2c_dev_pm_ops = {
+ .prepare = pm_sleep_ptr(amd_isp_dw_i2c_plat_prepare),
+ LATE_SYSTEM_SLEEP_PM_OPS(amd_isp_dw_i2c_plat_suspend, amd_isp_dw_i2c_plat_resume)
+ RUNTIME_PM_OPS(amd_isp_dw_i2c_plat_runtime_suspend, amd_isp_dw_i2c_plat_runtime_resume, NULL)
+};
+
+/* Work with hotplug and coldplug */
+MODULE_ALIAS("platform:amd_isp_i2c_designware");
+
+static struct platform_driver amd_isp_dw_i2c_driver = {
+ .probe = amd_isp_dw_i2c_plat_probe,
+ .remove = amd_isp_dw_i2c_plat_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = pm_ptr(&amd_isp_dw_i2c_dev_pm_ops),
+ },
+};
+module_platform_driver(amd_isp_dw_i2c_driver);
+
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter in AMD ISP");
+MODULE_IMPORT_NS("I2C_DW");
+MODULE_IMPORT_NS("I2C_DW_COMMON");
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vengutta@amd.com>");
+MODULE_AUTHOR("Pratap Nirujogi <pratap.nirujogi@amd.com>");
+MODULE_AUTHOR("Bin Du <bin.du@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 8eb7bd640f8d..5b1e8f74c4ac 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -572,8 +572,10 @@ u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev)
* Clock is not necessary if we got LCNT/HCNT values directly from
* the platform code.
*/
- if (WARN_ON_ONCE(!dev->get_clk_rate_khz))
+ if (!dev->get_clk_rate_khz) {
+ dev_dbg_once(dev->dev, "Callback get_clk_rate_khz() is not defined\n");
return 0;
+ }
return dev->get_clk_rate_khz(dev);
}
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index d6e1ee935399..879719e91df2 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -34,7 +34,7 @@
static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
{
- return clk_get_rate(dev->clk) / KILO;
+ return clk_get_rate(dev->clk) / HZ_PER_KHZ;
}
#ifdef CONFIG_OF
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 5cd4a5f7a472..b936a240db0a 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -96,7 +96,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
i2c_dw_disable(dev);
synchronize_irq(dev->irq);
dev->slave = NULL;
- pm_runtime_put(dev->dev);
+ pm_runtime_put_sync_suspend(dev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 48e1af544b75..a7f89946dad4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1180,7 +1180,7 @@ static void i801_probe_optional_targets(struct i801_priv *priv)
#ifdef CONFIG_I2C_I801_MUX
if (!priv->mux_pdev)
#endif
- i2c_register_spd(&priv->adapter);
+ i2c_register_spd_write_enable(&priv->adapter);
}
#else
static void __init input_apanel_init(void) {}
@@ -1283,7 +1283,7 @@ static int i801_notifier_call(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
/* Call i2c_register_spd for muxed child segments */
- i2c_register_spd(to_i2c_adapter(dev));
+ i2c_register_spd_write_enable(to_i2c_adapter(dev));
return NOTIFY_OK;
}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 9e5d454d8318..de01dfecb16e 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1711,11 +1711,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return irq;
+ return dev_err_probe(&pdev->dev, irq, "can't get IRQ\n");
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
- return PTR_ERR(base);
+ return dev_err_probe(&pdev->dev, PTR_ERR(base), "can't get IO memory\n");
phy_addr = (dma_addr_t)res->start;
i2c_imx = devm_kzalloc(&pdev->dev, sizeof(*i2c_imx), GFP_KERNEL);
@@ -1810,13 +1810,15 @@ static int i2c_imx_probe(struct platform_device *pdev)
*/
ret = i2c_imx_dma_request(i2c_imx, phy_addr);
if (ret) {
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ dev_err_probe(&pdev->dev, ret, "can't get DMA channels\n");
goto clk_notifier_unregister;
- else if (ret == -ENODEV)
+ } else if (ret == -ENODEV) {
dev_dbg(&pdev->dev, "Only use PIO mode\n");
- else
+ } else {
dev_warn(&pdev->dev, "Failed to setup DMA (%pe), only use PIO mode\n",
ERR_PTR(ret));
+ }
}
/* Add I2C adapter */
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index c93c02aa6ac8..7aaefb21416a 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -933,7 +933,7 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_region(pdev, SMBBAR, ismt_driver.name);
+ err = pcim_request_region(pdev, SMBBAR, ismt_driver.name);
if (err) {
dev_err(&pdev->dev,
"Failed to request SMBus region 0x%lx-0x%lx\n",
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 6943a0de860a..ccd13c4fb83e 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -442,8 +442,13 @@ static int i2c_lpc2k_suspend(struct device *dev)
static int i2c_lpc2k_resume(struct device *dev)
{
struct lpc2k_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
- clk_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock.\n");
+ return ret;
+ }
i2c_lpc2k_reset(i2c);
return 0;
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 5db73429125c..492bf4c34722 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -76,6 +76,8 @@
#define CORE_I2C_FREQ (0x14)
#define CORE_I2C_GLITCHREG (0x18)
#define CORE_I2C_SLAVE1_ADDR (0x1c)
+#define CORE_I2C_SMBUS_MSG_WR (0x0)
+#define CORE_I2C_SMBUS_MSG_RD (0x1)
#define PCLK_DIV_960 (CTRL_CR2)
#define PCLK_DIV_256 (0)
@@ -424,9 +426,109 @@ static u32 mchp_corei2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+static int mchp_corei2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
+ char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct i2c_msg msgs[2];
+ struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
+ u8 tx_buf[I2C_SMBUS_BLOCK_MAX + 2];
+ u8 rx_buf[I2C_SMBUS_BLOCK_MAX + 1];
+ int num_msgs = 1;
+
+ msgs[CORE_I2C_SMBUS_MSG_WR].addr = addr;
+ msgs[CORE_I2C_SMBUS_MSG_WR].flags = 0;
+
+ if (read_write == I2C_SMBUS_READ && size <= I2C_SMBUS_BYTE)
+ msgs[CORE_I2C_SMBUS_MSG_WR].flags = I2C_M_RD;
+
+ if (read_write == I2C_SMBUS_WRITE && size <= I2C_SMBUS_WORD_DATA)
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = size;
+
+ if (read_write == I2C_SMBUS_WRITE && size > I2C_SMBUS_BYTE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command;
+ }
+
+ if (read_write == I2C_SMBUS_READ && size >= I2C_SMBUS_BYTE_DATA) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command;
+ msgs[CORE_I2C_SMBUS_MSG_RD].addr = addr;
+ msgs[CORE_I2C_SMBUS_MSG_RD].flags = I2C_M_RD;
+ num_msgs = 2;
+ }
+
+ if (read_write == I2C_SMBUS_READ && size > I2C_SMBUS_QUICK)
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = 1;
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = NULL;
+ return 0;
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE)
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = &command;
+ else
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = &data->byte;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->byte;
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = &data->byte;
+ }
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->word & 0xFF;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[2] = (data->word >> 8) & 0xFF;
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf;
+ }
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ int data_len;
+
+ data_len = data->block[0];
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = data_len + 2;
+ for (int i = 0; i <= data_len; i++)
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[i + 1] = data->block[i];
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = I2C_SMBUS_BLOCK_MAX + 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mchp_corei2c_xfer(&idev->adapter, msgs, num_msgs);
+ if (read_write == I2C_SMBUS_WRITE || size <= I2C_SMBUS_BYTE_DATA)
+ return 0;
+
+ switch (size) {
+ case I2C_SMBUS_WORD_DATA:
+ data->word = (rx_buf[0] | (rx_buf[1] << 8));
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (rx_buf[0] > I2C_SMBUS_BLOCK_MAX)
+ rx_buf[0] = I2C_SMBUS_BLOCK_MAX;
+ /* As per protocol first member of block is size of the block. */
+ for (int i = 0; i <= rx_buf[0]; i++)
+ data->block[i] = rx_buf[i];
+ break;
+ }
+
+ return 0;
+}
+
static const struct i2c_algorithm mchp_corei2c_algo = {
.master_xfer = mchp_corei2c_xfer,
.functionality = mchp_corei2c_func,
+ .smbus_xfer = mchp_corei2c_smbus_xfer,
};
static int mchp_corei2c_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 280dde53d7f3..8345f7e6385d 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
/* Defines what functionality is present. */
#define MLXBF_I2C_FUNC_SMBUS_BLOCK \
@@ -197,6 +198,7 @@
#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
+#define MLXBF_I2C_MASK_32 GENMASK(31, 0)
#define MLXBF_I2C_MST_ADDR_OFFSET 0x200
@@ -223,7 +225,7 @@
#define MLXBF_I2C_MASTER_ENABLE \
(MLXBF_I2C_MASTER_LOCK_BIT | MLXBF_I2C_MASTER_BUSY_BIT | \
- MLXBF_I2C_MASTER_START_BIT | MLXBF_I2C_MASTER_STOP_BIT)
+ MLXBF_I2C_MASTER_START_BIT)
#define MLXBF_I2C_MASTER_ENABLE_WRITE \
(MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_WRITE_BIT)
@@ -337,6 +339,7 @@ enum {
MLXBF_I2C_F_SMBUS_BLOCK = BIT(5),
MLXBF_I2C_F_SMBUS_PEC = BIT(6),
MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7),
+ MLXBF_I2C_F_WRITE_WITHOUT_STOP = BIT(8),
};
/* Mellanox BlueField chip type. */
@@ -637,16 +640,19 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
}
static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
- u8 len, u8 block_en, u8 pec_en, bool read)
+ u8 len, u8 block_en, u8 pec_en, bool read,
+ bool stop)
{
- u32 command;
+ u32 command = 0;
/* Set Master GW control word. */
+ if (stop)
+ command |= MLXBF_I2C_MASTER_STOP_BIT;
if (read) {
- command = MLXBF_I2C_MASTER_ENABLE_READ;
+ command |= MLXBF_I2C_MASTER_ENABLE_READ;
command |= rol32(len, MLXBF_I2C_MASTER_READ_SHIFT);
} else {
- command = MLXBF_I2C_MASTER_ENABLE_WRITE;
+ command |= MLXBF_I2C_MASTER_ENABLE_WRITE;
command |= rol32(len, MLXBF_I2C_MASTER_WRITE_SHIFT);
}
command |= rol32(slave, MLXBF_I2C_MASTER_SLV_ADDR_SHIFT);
@@ -681,8 +687,10 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
u8 op_idx, data_idx, data_len, write_len, read_len;
struct mlxbf_i2c_smbus_operation *operation;
u8 read_en, write_en, block_en, pec_en;
- u8 slave, flags, addr;
+ bool stop_after_write = true;
+ u8 slave, addr;
u8 *read_buf;
+ u32 flags;
u32 bits;
int ret;
@@ -754,7 +762,16 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
memcpy(data_desc + data_idx,
operation->buffer, operation->length);
data_idx += operation->length;
+
+ /*
+ * The stop condition can be skipped when writing on the bus
+ * to implement a repeated start condition on the next read
+ * as required for several SMBus and I2C operations.
+ */
+ if (flags & MLXBF_I2C_F_WRITE_WITHOUT_STOP)
+ stop_after_write = false;
}
+
/*
* We assume that read operations are performed only once per
* SMBus transaction. *TBD* protect this statement so it won't
@@ -780,7 +797,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
if (write_en) {
ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en,
- pec_en, 0);
+ pec_en, 0, stop_after_write);
if (ret)
goto out_unlock;
}
@@ -790,7 +807,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1,
MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en,
- pec_en, 1);
+ pec_en, 1, true);
if (!ret) {
/* Get Master GW data descriptor. */
mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1,
@@ -896,6 +913,9 @@ mlxbf_i2c_smbus_i2c_block_func(struct mlxbf_i2c_smbus_request *request,
request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
request->operation[0].buffer = command;
+ if (read)
+ request->operation[0].flags |= MLXBF_I2C_F_WRITE_WITHOUT_STOP;
+
/*
* As specified in the standard, the max number of bytes to read/write
* per block operation is 32 bytes. In Golan code, the controller can
@@ -1063,7 +1083,7 @@ static u32 mlxbf_i2c_get_ticks(struct mlxbf_i2c_priv *priv, u64 nanoseconds,
* Frequency
*/
frequency = priv->frequency;
- ticks = (nanoseconds * frequency) / MLXBF_I2C_FREQUENCY_1GHZ;
+ ticks = div_u64(nanoseconds * frequency, MLXBF_I2C_FREQUENCY_1GHZ);
/*
* The number of ticks is rounded down and if minimum is equal to 1
* then add one tick.
@@ -1130,7 +1150,8 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
- timer = timings->timeout;
+ timer = mlxbf_i2c_set_timer(priv, timings->timeout, false,
+ MLXBF_I2C_MASK_32, MLXBF_I2C_SHIFT_0);
writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
}
@@ -1140,11 +1161,7 @@ enum mlxbf_i2c_timings_config {
MLXBF_I2C_TIMING_CONFIG_1000KHZ,
};
-/*
- * Note that the mlxbf_i2c_timings->timeout value is not related to the
- * bus frequency, it is impacted by the time it takes the driver to
- * complete data transmission before transaction abort.
- */
+/* Timing values are in nanoseconds */
static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
[MLXBF_I2C_TIMING_CONFIG_100KHZ] = {
.scl_high = 4810,
@@ -1159,8 +1176,8 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
.scl_fall = 50,
.hold_data = 300,
.buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .thigh_max = 50000,
+ .timeout = 35000000
},
[MLXBF_I2C_TIMING_CONFIG_400KHZ] = {
.scl_high = 1011,
@@ -1175,24 +1192,24 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
.scl_fall = 50,
.hold_data = 300,
.buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .thigh_max = 50000,
+ .timeout = 35000000
},
[MLXBF_I2C_TIMING_CONFIG_1000KHZ] = {
- .scl_high = 600,
- .scl_low = 1300,
+ .scl_high = 383,
+ .scl_low = 460,
.hold_start = 600,
- .setup_start = 600,
- .setup_stop = 600,
- .setup_data = 100,
+ .setup_start = 260,
+ .setup_stop = 260,
+ .setup_data = 50,
.sda_rise = 50,
.sda_fall = 50,
.scl_rise = 50,
.scl_fall = 50,
.hold_data = 300,
- .buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .buf = 500,
+ .thigh_max = 50000,
+ .timeout = 35000000
}
};
@@ -1443,9 +1460,8 @@ static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_
* and PadFrequency, respectively.
*/
core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
- core_frequency /= (++core_r) * (++core_od);
- return core_frequency;
+ return div_u64(core_frequency, (++core_r) * (++core_od));
}
static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
@@ -1474,9 +1490,8 @@ static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_r
* and PadFrequency, respectively.
*/
corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
- corepll_frequency /= (++core_r) * (++core_od);
- return corepll_frequency;
+ return div_u64(corepll_frequency, (++core_r) * (++core_od));
}
static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev,
@@ -2038,21 +2053,21 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
read ? &data->byte : &command, read,
pec);
dev_dbg(&adap->dev, "smbus %s byte, slave 0x%02x.\n",
- read ? "read" : "write", addr);
+ str_read_write(read), addr);
break;
case I2C_SMBUS_BYTE_DATA:
mlxbf_i2c_smbus_data_byte_func(&request, &command, &data->byte,
read, pec);
dev_dbg(&adap->dev, "smbus %s byte data at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", command, addr);
+ str_read_write(read), command, addr);
break;
case I2C_SMBUS_WORD_DATA:
mlxbf_i2c_smbus_data_word_func(&request, &command,
(u8 *)&data->word, read, pec);
dev_dbg(&adap->dev, "smbus %s word data at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", command, addr);
+ str_read_write(read), command, addr);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
@@ -2060,7 +2075,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
mlxbf_i2c_smbus_i2c_block_func(&request, &command, data->block,
&byte_cnt, read, pec);
dev_dbg(&adap->dev, "i2c %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", byte_cnt, command, addr);
+ str_read_write(read), byte_cnt, command, addr);
break;
case I2C_SMBUS_BLOCK_DATA:
@@ -2068,7 +2083,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
mlxbf_i2c_smbus_block_func(&request, &command, data->block,
&byte_cnt, read, pec);
dev_dbg(&adap->dev, "smbus %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", byte_cnt, command, addr);
+ str_read_write(read), byte_cnt, command, addr);
break;
case I2C_FUNC_SMBUS_PROC_CALL:
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index de713b5747fe..892e2d2988a7 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -1115,14 +1115,10 @@ static void npcm_i2c_master_abort(struct npcm_i2c *bus)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type)
{
- u8 slave_add;
-
if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10)
dev_err(bus->dev, "get slave: try to use more than 2 SA not supported\n");
- slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]);
-
- return slave_add;
+ return ioread8(bus->reg + npcm_i2caddr[addr_type]);
}
static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add)
@@ -2178,10 +2174,14 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
/* Check HW is OK: SDA and SCL should be high at this point. */
if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
- dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
- dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
- npcm_i2c_get_SCL(&bus->adap));
- return -ENXIO;
+ dev_warn(bus->dev, " I2C%d SDA=%d SCL=%d, attempting to recover\n", bus->num,
+ npcm_i2c_get_SDA(&bus->adap), npcm_i2c_get_SCL(&bus->adap));
+ if (npcm_i2c_recovery_tgclk(&bus->adap)) {
+ dev_err(bus->dev, "I2C%d init fail: SDA=%d SCL=%d\n",
+ bus->num, npcm_i2c_get_SDA(&bus->adap),
+ npcm_i2c_get_SCL(&bus->adap));
+ return -ENXIO;
+ }
}
npcm_i2c_int_enable(bus, true);
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index baf6b27f3752..93a49e4637ec 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -135,6 +135,32 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
}
+static void octeon_i2c_block_enable(struct octeon_i2c *i2c)
+{
+ u64 mode;
+
+ if (i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c))
+ return;
+
+ i2c->block_enabled = true;
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ mode |= TWSX_MODE_BLOCK_MODE;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+}
+
+static void octeon_i2c_block_disable(struct octeon_i2c *i2c)
+{
+ u64 mode;
+
+ if (!i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c))
+ return;
+
+ i2c->block_enabled = false;
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ mode &= ~TWSX_MODE_BLOCK_MODE;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+}
+
/**
* octeon_i2c_hlc_wait - wait for an HLC operation to complete
* @i2c: The struct octeon_i2c
@@ -281,6 +307,7 @@ static int octeon_i2c_start(struct octeon_i2c *i2c)
u8 stat;
octeon_i2c_hlc_disable(i2c);
+ octeon_i2c_block_disable(i2c);
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
ret = octeon_i2c_wait(i2c);
@@ -605,6 +632,125 @@ err:
}
/**
+ * octeon_i2c_hlc_block_comp_read - high-level-controller composite block read
+ * @i2c: The struct octeon_i2c
+ * @msgs: msg[0] contains address, place read data into msg[1]
+ *
+ * i2c core command is constructed and written into the SW_TWSI register.
+ * The execution of the command will result in requested data being
+ * placed into a FIFO buffer, ready to be read.
+ * Used in the case where the i2c xfer is for greater than 8 bytes of read data.
+ *
+ * Returns: 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_hlc_block_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ int ret;
+ u16 len, i;
+ u64 cmd;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_block_enable(i2c);
+
+ /* Write (size - 1) into block control register */
+ len = msgs[1].len - 1;
+ octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c));
+
+ /* Prepare core command */
+ cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ /* Send core command */
+ ret = octeon_i2c_hlc_read_cmd(i2c, msgs[0], cmd);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
+ if ((cmd & SW_TWSI_R) == 0) {
+ octeon_i2c_block_disable(i2c);
+ return octeon_i2c_check_status(i2c, false);
+ }
+
+ /* read data in FIFO */
+ octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR,
+ i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c));
+ for (i = 0; i <= len; i += 8) {
+ /* Byte-swap FIFO data and copy into msg buffer */
+ __be64 rd = cpu_to_be64(__raw_readq(i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c)));
+
+ memcpy(&msgs[1].buf[i], &rd, min(8, msgs[1].len - i));
+ }
+
+err:
+ octeon_i2c_block_disable(i2c);
+ return ret;
+}
+
+/**
+ * octeon_i2c_hlc_block_comp_write - high-level-controller composite block write
+ * @i2c: The struct octeon_i2c
+ * @msgs: msg[0] contains address, msg[1] contains data to be written
+ *
+ * i2c core command is constructed and write data is written into the FIFO buffer.
+ * The execution of the command will result in HW write, using the data in FIFO.
+ * Used in the case where the i2c xfer is for greater than 8 bytes of write data.
+ *
+ * Returns: 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_hlc_block_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ bool set_ext;
+ int ret;
+ u16 len, i;
+ u64 cmd, ext = 0;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_block_enable(i2c);
+
+ /* Write (size - 1) into block control register */
+ len = msgs[1].len - 1;
+ octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c));
+
+ /* Prepare core command */
+ cmd = SW_TWSI_V | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ /* Set parameters for extended message (if required) */
+ set_ext = octeon_i2c_hlc_ext(i2c, msgs[0], &cmd, &ext);
+
+ /* Write msg into FIFO buffer */
+ octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR,
+ i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c));
+ for (i = 0; i <= len; i += 8) {
+ __be64 buf = 0;
+
+ /* Copy 8 bytes or remaining bytes from message buffer */
+ memcpy(&buf, &msgs[1].buf[i], min(8, msgs[1].len - i));
+
+ /* Byte-swap message data and write into FIFO */
+ buf = cpu_to_be64(buf);
+ octeon_i2c_writeq_flush((u64)buf, i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c));
+ }
+ if (set_ext)
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
+
+ /* Send command to core (send data in FIFO) */
+ ret = octeon_i2c_hlc_cmd_send(i2c, cmd);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
+ if ((cmd & SW_TWSI_R) == 0) {
+ octeon_i2c_block_disable(i2c);
+ return octeon_i2c_check_status(i2c, false);
+ }
+
+err:
+ octeon_i2c_block_disable(i2c);
+ return ret;
+}
+
+/**
* octeon_i2c_xfer - The driver's xfer function
* @adap: Pointer to the i2c_adapter structure
* @msgs: Pointer to the messages to be processed
@@ -630,13 +776,21 @@ int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if ((msgs[0].flags & I2C_M_RD) == 0 &&
(msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
msgs[0].len > 0 && msgs[0].len <= 2 &&
- msgs[1].len > 0 && msgs[1].len <= 8 &&
+ msgs[1].len > 0 &&
msgs[0].addr == msgs[1].addr) {
- if (msgs[1].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_comp_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_comp_write(i2c, msgs);
- goto out;
+ if (msgs[1].len <= 8) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_comp_write(i2c, msgs);
+ goto out;
+ } else if (msgs[1].len <= 1024 && OCTEON_REG_BLOCK_CTL(i2c)) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_block_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_block_comp_write(i2c, msgs);
+ goto out;
+ }
}
}
}
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index b265e21189a1..32a44f2d6274 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -96,18 +96,28 @@ struct octeon_i2c_reg_offset {
unsigned int twsi_int;
unsigned int sw_twsi_ext;
unsigned int mode;
+ unsigned int block_ctl;
+ unsigned int block_sts;
+ unsigned int block_fifo;
};
#define OCTEON_REG_SW_TWSI(x) ((x)->roff.sw_twsi)
#define OCTEON_REG_TWSI_INT(x) ((x)->roff.twsi_int)
#define OCTEON_REG_SW_TWSI_EXT(x) ((x)->roff.sw_twsi_ext)
#define OCTEON_REG_MODE(x) ((x)->roff.mode)
+#define OCTEON_REG_BLOCK_CTL(x) ((x)->roff.block_ctl)
+#define OCTEON_REG_BLOCK_STS(x) ((x)->roff.block_sts)
+#define OCTEON_REG_BLOCK_FIFO(x) ((x)->roff.block_fifo)
-/* Set REFCLK_SRC and HS_MODE in TWSX_MODE register */
+/* TWSX_MODE register */
#define TWSX_MODE_REFCLK_SRC BIT(4)
+#define TWSX_MODE_BLOCK_MODE BIT(2)
#define TWSX_MODE_HS_MODE BIT(0)
#define TWSX_MODE_HS_MASK (TWSX_MODE_REFCLK_SRC | TWSX_MODE_HS_MODE)
+/* TWSX_BLOCK_STS register */
+#define TWSX_BLOCK_STS_RESET_PTR BIT(0)
+
/* Set BUS_MON_RST to reset bus monitor */
#define BUS_MON_RST_MASK BIT(3)
@@ -123,6 +133,7 @@ struct octeon_i2c {
void __iomem *twsi_base;
struct device *dev;
bool hlc_enabled;
+ bool block_enabled;
bool broken_irq_mode;
bool broken_irq_check;
void (*int_enable)(struct octeon_i2c *);
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index bd128ab2e2eb..f4eca44ed183 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -5,22 +5,24 @@
* SMBus host driver for PA Semi PWRficient
*/
-#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
#include <linux/sched.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/io.h>
+#include <linux/stddef.h>
#include "i2c-pasemi-core.h"
/* Register offsets */
#define REG_MTXFIFO 0x00
#define REG_MRXFIFO 0x04
+#define REG_XFSTA 0x0c
#define REG_SMSTA 0x14
#define REG_IMASK 0x18
#define REG_CTL 0x1c
@@ -52,6 +54,12 @@
#define CTL_UJM BIT(8)
#define CTL_CLK_M GENMASK(7, 0)
+/*
+ * The hardware (supposedly) has a 25ms timeout for clock stretching, thus
+ * use 100ms here which should be plenty.
+ */
+#define PASEMI_TRANSFER_TIMEOUT_MS 100
+
static inline void reg_write(struct pasemi_smbus *smbus, int reg, int val)
{
dev_dbg(smbus->dev, "smbus write reg %x val %08x\n", reg, val);
@@ -71,7 +79,7 @@ static inline int reg_read(struct pasemi_smbus *smbus, int reg)
static void pasemi_reset(struct pasemi_smbus *smbus)
{
- u32 val = (CTL_MTR | CTL_MRR | (smbus->clk_div & CTL_CLK_M));
+ u32 val = (CTL_MTR | CTL_MRR | CTL_UJM | (smbus->clk_div & CTL_CLK_M));
if (smbus->hw_rev >= 6)
val |= CTL_EN;
@@ -80,43 +88,102 @@ static void pasemi_reset(struct pasemi_smbus *smbus)
reinit_completion(&smbus->irq_completion);
}
-static void pasemi_smb_clear(struct pasemi_smbus *smbus)
+static int pasemi_smb_clear(struct pasemi_smbus *smbus)
{
unsigned int status;
+ int ret;
+
+ /* First wait for the bus to go idle */
+ ret = readx_poll_timeout(ioread32, smbus->ioaddr + REG_SMSTA,
+ status, !(status & (SMSTA_XIP | SMSTA_JAM)),
+ USEC_PER_MSEC,
+ USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS);
+
+ if (ret < 0) {
+ dev_err(smbus->dev, "Bus is still stuck (status 0x%08x xfstatus 0x%08x)\n",
+ status, reg_read(smbus, REG_XFSTA));
+ return -EIO;
+ }
+
+ /* If any badness happened or there is data in the FIFOs, reset the FIFOs */
+ if ((status & (SMSTA_MRNE | SMSTA_JMD | SMSTA_MTO | SMSTA_TOM | SMSTA_MTN | SMSTA_MTA)) ||
+ !(status & SMSTA_MTE)) {
+ dev_warn(smbus->dev, "Issuing reset due to status 0x%08x (xfstatus 0x%08x)\n",
+ status, reg_read(smbus, REG_XFSTA));
+ pasemi_reset(smbus);
+ }
- status = reg_read(smbus, REG_SMSTA);
+ /* Clear the flags */
reg_write(smbus, REG_SMSTA, status);
+
+ return 0;
}
static int pasemi_smb_waitready(struct pasemi_smbus *smbus)
{
- int timeout = 100;
unsigned int status;
if (smbus->use_irq) {
reinit_completion(&smbus->irq_completion);
reg_write(smbus, REG_IMASK, SMSTA_XEN | SMSTA_MTN);
- wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(100));
+ int ret = wait_for_completion_timeout(
+ &smbus->irq_completion,
+ msecs_to_jiffies(PASEMI_TRANSFER_TIMEOUT_MS));
reg_write(smbus, REG_IMASK, 0);
status = reg_read(smbus, REG_SMSTA);
+
+ if (ret < 0) {
+ dev_err(smbus->dev,
+ "Completion wait failed with %d, status 0x%08x\n",
+ ret, status);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(smbus->dev, "Timeout, status 0x%08x\n", status);
+ return -ETIME;
+ }
} else {
- status = reg_read(smbus, REG_SMSTA);
- while (!(status & SMSTA_XEN) && timeout--) {
- msleep(1);
- status = reg_read(smbus, REG_SMSTA);
+ int ret = readx_poll_timeout(
+ ioread32, smbus->ioaddr + REG_SMSTA,
+ status, status & SMSTA_XEN,
+ USEC_PER_MSEC,
+ USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS);
+
+ if (ret < 0) {
+ dev_err(smbus->dev, "Timeout, status 0x%08x\n", status);
+ return -ETIME;
}
}
- /* Got NACK? */
- if (status & SMSTA_MTN)
- return -ENXIO;
+ /* Controller timeout? */
+ if (status & SMSTA_TOM) {
+ dev_err(smbus->dev, "Controller timeout, status 0x%08x\n", status);
+ return -EIO;
+ }
- if (timeout < 0) {
- dev_warn(smbus->dev, "Timeout, status 0x%08x\n", status);
- reg_write(smbus, REG_SMSTA, status);
+ /* Peripheral timeout? */
+ if (status & SMSTA_MTO) {
+ dev_err(smbus->dev, "Peripheral timeout, status 0x%08x\n", status);
return -ETIME;
}
+ /* Still stuck in a transaction? */
+ if (status & SMSTA_XIP) {
+ dev_err(smbus->dev, "Bus stuck, status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Arbitration loss? */
+ if (status & SMSTA_MTA) {
+ dev_err(smbus->dev, "Arbitration loss, status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Got NACK? */
+ if (status & SMSTA_MTN) {
+ dev_err(smbus->dev, "NACK, status 0x%08x\n", status);
+ return -ENXIO;
+ }
+
/* Clear XEN */
reg_write(smbus, REG_SMSTA, SMSTA_XEN);
@@ -177,9 +244,9 @@ static int pasemi_i2c_xfer(struct i2c_adapter *adapter,
struct pasemi_smbus *smbus = adapter->algo_data;
int ret, i;
- pasemi_smb_clear(smbus);
-
- ret = 0;
+ ret = pasemi_smb_clear(smbus);
+ if (ret)
+ return ret;
for (i = 0; i < num && !ret; i++)
ret = pasemi_i2c_xfer_msg(adapter, &msgs[i], (i == (num - 1)));
@@ -200,7 +267,9 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
addr <<= 1;
read_flag = read_write == I2C_SMBUS_READ;
- pasemi_smb_clear(smbus);
+ err = pasemi_smb_clear(smbus);
+ if (err)
+ return err;
switch (size) {
case I2C_SMBUS_QUICK:
diff --git a/drivers/i2c/busses/i2c-pasemi-pci.c b/drivers/i2c/busses/i2c-pasemi-pci.c
index 77f90c7436ed..b9ccb54ec77e 100644
--- a/drivers/i2c/busses/i2c-pasemi-pci.c
+++ b/drivers/i2c/busses/i2c-pasemi-pci.c
@@ -5,15 +5,15 @@
* SMBus host driver for PA Semi PWRficient
*/
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
#include <linux/sched.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/io.h>
+#include <linux/stddef.h>
#include "i2c-pasemi-core.h"
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 59ecaa990bce..9d3a4dc2bd60 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -971,7 +971,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
* This would allow the ee1004 to be probed incorrectly.
*/
if (port == 0)
- i2c_register_spd(adap);
+ i2c_register_spd_write_enable(adap);
*padap = adap;
return 0;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 9a867c817db0..f99a2cc721a8 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -349,7 +349,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap,
/* Fill out the rest of the info structure */
info.addr = addr;
info.irq = irq_of_parse_and_map(node, 0);
- info.of_node = of_node_get(node);
+ info.fwnode = of_fwnode_handle(of_node_get(node));
newdev = i2c_new_client_device(adap, &info);
if (IS_ERR(newdev)) {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 515a784c951c..ccea575fb783 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -71,7 +71,6 @@ enum geni_i2c_err_code {
<< 5)
#define I2C_AUTO_SUSPEND_DELAY 250
-#define KHZ(freq) (1000 * freq)
#define PACKING_BYTES_PW 4
#define ABORT_TIMEOUT HZ
@@ -148,18 +147,18 @@ struct geni_i2c_clk_fld {
* source_clock = 19.2 MHz
*/
static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = {
- {KHZ(100), 7, 10, 12, 26},
- {KHZ(400), 2, 5, 11, 22},
- {KHZ(1000), 1, 2, 8, 18},
- {},
+ { I2C_MAX_STANDARD_MODE_FREQ, 7, 10, 12, 26 },
+ { I2C_MAX_FAST_MODE_FREQ, 2, 5, 11, 22 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 1, 2, 8, 18 },
+ {}
};
/* source_clock = 32 MHz */
static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = {
- {KHZ(100), 8, 14, 18, 40},
- {KHZ(400), 4, 3, 11, 20},
- {KHZ(1000), 2, 3, 6, 15},
- {},
+ { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 40 },
+ { I2C_MAX_FAST_MODE_FREQ, 4, 3, 11, 20 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 6, 15 },
+ {}
};
static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
@@ -812,7 +811,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
&gi2c->clk_freq_out);
if (ret) {
dev_info(dev, "Bus frequency not specified, default to 100kHz.\n");
- gi2c->clk_freq_out = KHZ(100);
+ gi2c->clk_freq_out = I2C_MAX_STANDARD_MODE_FREQ;
}
if (has_acpi_companion(dev))
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index d7dddd6c296a..23375f7fe3ad 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -52,6 +52,8 @@
#define ICCR1_ICE BIT(7)
#define ICCR1_IICRST BIT(6)
#define ICCR1_SOWP BIT(4)
+#define ICCR1_SCLO BIT(3)
+#define ICCR1_SDAO BIT(2)
#define ICCR1_SCLI BIT(1)
#define ICCR1_SDAI BIT(0)
@@ -151,11 +153,11 @@ static int riic_bus_barrier(struct riic_dev *riic)
ret = readb_poll_timeout(riic->base + riic->info->regs[RIIC_ICCR2], val,
!(val & ICCR2_BBSY), 10, riic->adapter.timeout);
if (ret)
- return ret;
+ return i2c_recover_bus(&riic->adapter);
if ((riic_readb(riic, RIIC_ICCR1) & (ICCR1_SDAI | ICCR1_SCLI)) !=
(ICCR1_SDAI | ICCR1_SCLI))
- return -EBUSY;
+ return i2c_recover_bus(&riic->adapter);
return 0;
}
@@ -439,6 +441,52 @@ static int riic_init_hw(struct riic_dev *riic)
return 0;
}
+static int riic_get_scl(struct i2c_adapter *adap)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SCLI);
+}
+
+static int riic_get_sda(struct i2c_adapter *adap)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SDAI);
+}
+
+static void riic_set_scl(struct i2c_adapter *adap, int val)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ if (val)
+ riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SCLO, RIIC_ICCR1);
+ else
+ riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SCLO, 0, RIIC_ICCR1);
+
+ riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1);
+}
+
+static void riic_set_sda(struct i2c_adapter *adap, int val)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ if (val)
+ riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SDAO, RIIC_ICCR1);
+ else
+ riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SDAO, 0, RIIC_ICCR1);
+
+ riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1);
+}
+
+static struct i2c_bus_recovery_info riic_bri = {
+ .recover_bus = i2c_generic_scl_recovery,
+ .get_scl = riic_get_scl,
+ .set_scl = riic_set_scl,
+ .get_sda = riic_get_sda,
+ .set_sda = riic_set_sda,
+};
+
static const struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
@@ -495,6 +543,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
adap->algo = &riic_algo;
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
+ adap->bus_recovery_info = &riic_bri;
init_completion(&riic->msg_done);
diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c
index 53762cc56d28..b0e9c0b62429 100644
--- a/drivers/i2c/busses/i2c-rzv2m.c
+++ b/drivers/i2c/busses/i2c-rzv2m.c
@@ -402,7 +402,7 @@ static const struct i2c_adapter_quirks rzv2m_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
-static struct i2c_algorithm rzv2m_i2c_algo = {
+static const struct i2c_algorithm rzv2m_i2c_algo = {
.xfer = rzv2m_i2c_xfer,
.functionality = rzv2m_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index efe29621b8d7..adfcee6c9fdc 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/* Transmit operation: */
/* */
@@ -409,7 +410,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
pd->sr |= sr; /* remember state */
dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr,
- (pd->msg->flags & I2C_M_RD) ? "read" : "write",
+ str_read_write(pd->msg->flags & I2C_M_RD),
pd->pos, pd->msg->len);
/* Kick off TxDMA after preface was done */
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 87976e99e6d0..049b4d154c23 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1395,6 +1395,11 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE);
if (ret)
break;
+
+ /* Validate message length before proceeding */
+ if (msgs[i].buf[0] == 0 || msgs[i].buf[0] > I2C_SMBUS_BLOCK_MAX)
+ break;
+
/* Set the msg length from first byte */
msgs[i].len += msgs[i].buf[0];
dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len);
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 143d012fa43e..3959f23fc440 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -168,6 +168,9 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->roff.twsi_int = 0x1010;
i2c->roff.sw_twsi_ext = 0x1018;
i2c->roff.mode = 0x1038;
+ i2c->roff.block_ctl = 0x1048;
+ i2c->roff.block_sts = 0x1050;
+ i2c->roff.block_fifo = 0x1058;
i2c->dev = dev;
pci_set_drvdata(pdev, i2c);
@@ -175,7 +178,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_request_all_regions(pdev, DRV_NAME);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0f2ed181b266..a18eab0992a1 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
/* include interfaces to usb layer */
@@ -71,7 +72,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
dev_dbg(&adapter->dev,
" %d: %s (flags %d) %d bytes to 0x%02x\n",
- i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ i, str_read_write(pmsg->flags & I2C_M_RD),
pmsg->flags, pmsg->len, pmsg->addr);
/* and directly send the message */
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index d877f5a1f579..ca0358e8f928 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -532,22 +532,16 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
- return -EINVAL;
- }
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ)
+ return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed);
priv->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to enable clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n");
clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ if (!clk_rate)
+ return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n");
priv->clk_cycle = clk_rate / bus_speed;
init_completion(&priv->comp);
@@ -565,10 +559,8 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0,
pdev->name, priv);
- if (ret) {
- dev_err(dev, "failed to request irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %d\n", irq);
return i2c_add_adapter(&priv->adap);
}
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index b95d50d4d7db..9d49a3d5d612 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -327,22 +327,16 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
- return -EINVAL;
- }
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ)
+ return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed);
priv->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to enable clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n");
clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ if (!clk_rate)
+ return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n");
priv->clk_cycle = clk_rate / bus_speed;
init_completion(&priv->comp);
@@ -359,10 +353,8 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, uniphier_i2c_interrupt, 0, pdev->name,
priv);
- if (ret) {
- dev_err(dev, "failed to request irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %d\n", irq);
return i2c_add_adapter(&priv->adap);
}
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 7ed29992a97f..2c26a57883f2 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,10 +89,9 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
u8 rev;
int res;
- if (pm_io_base) {
- dev_err(&dev->dev, "i2c-via: Will only support one host\n");
- return -ENODEV;
- }
+ if (pm_io_base)
+ return dev_err_probe(&dev->dev, -ENODEV,
+ "Will only support one host\n");
pci_read_config_byte(dev, PM_CFG_REVID, &rev);
@@ -113,10 +112,10 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_read_config_word(dev, base, &pm_io_base);
pm_io_base &= (0xff << 8);
- if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name)) {
- dev_err(&dev->dev, "IO 0x%x-0x%x already in use\n", I2C_DIR, I2C_DIR + IOSPACE);
- return -ENODEV;
- }
+ if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name))
+ return dev_err_probe(&dev->dev, -ENODEV,
+ "IO 0x%x-0x%x already in use\n",
+ I2C_DIR, I2C_DIR + IOSPACE);
outb(inb(I2C_DIR) & ~(I2C_SDA | I2C_SCL), I2C_DIR);
outb(inb(I2C_OUT) & ~(I2C_SDA | I2C_SCL), I2C_OUT);
diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c
index 4eb740faf268..2cf3cc0165fb 100644
--- a/drivers/i2c/busses/i2c-viai2c-wmt.c
+++ b/drivers/i2c/busses/i2c-viai2c-wmt.c
@@ -44,16 +44,13 @@ static int wmt_i2c_reset_hardware(struct viai2c *i2c)
int err;
err = clk_prepare_enable(i2c->clk);
- if (err) {
- dev_err(i2c->dev, "failed to enable clock\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(i2c->dev, err, "failed to enable clock\n");
err = clk_set_rate(i2c->clk, 20000000);
if (err) {
- dev_err(i2c->dev, "failed to set clock = 20Mhz\n");
clk_disable_unprepare(i2c->clk);
- return err;
+ return dev_err_probe(i2c->dev, err, "failed to set clock = 20Mhz\n");
}
writew(0, i2c->base + VIAI2C_REG_CR);
@@ -121,10 +118,9 @@ static int wmt_i2c_probe(struct platform_device *pdev)
"failed to request irq %i\n", i2c->irq);
i2c->clk = of_clk_get(np, 0);
- if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "unable to request clock\n");
- return PTR_ERR(i2c->clk);
- }
+ if (IS_ERR(i2c->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
+ "unable to request clock\n");
err = of_property_read_u32(np, "clock-frequency", &clk_rate);
if (!err && clk_rate == I2C_MAX_FAST_MODE_FREQ)
@@ -139,10 +135,8 @@ static int wmt_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
err = wmt_i2c_reset_hardware(i2c);
- if (err) {
- dev_err(&pdev->dev, "error initializing hardware\n");
+ if (err)
return err;
- }
err = i2c_add_adapter(adap);
if (err)
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 2cc7bba3b8bf..c58843609107 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -330,30 +330,27 @@ static int vt596_probe(struct pci_dev *pdev,
SMBHSTCFG = 0x84;
} else {
/* no matches at all */
- dev_err(&pdev->dev, "Cannot configure "
- "SMBus I/O Base address\n");
- return -ENODEV;
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Cannot configure "
+ "SMBus I/O Base address\n");
}
}
vt596_smba &= 0xfff0;
- if (vt596_smba == 0) {
- dev_err(&pdev->dev, "SMBus base address "
- "uninitialized - upgrade BIOS or use "
- "force_addr=0xaddr\n");
- return -ENODEV;
- }
+ if (vt596_smba == 0)
+ return dev_err_probe(&pdev->dev, -ENODEV, "SMBus base address "
+ "uninitialized - upgrade BIOS or use "
+ "force_addr=0xaddr\n");
found:
error = acpi_check_region(vt596_smba, 8, vt596_driver.name);
if (error)
return -ENODEV;
- if (!request_region(vt596_smba, 8, vt596_driver.name)) {
- dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n",
- vt596_smba);
- return -ENODEV;
- }
+ if (!request_region(vt596_smba, 8, vt596_driver.name))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "SMBus region 0x%x already in use!\n",
+ vt596_smba);
pci_read_config_byte(pdev, SMBHSTCFG, &temp);
/* If force_addr is set, we program the new address here. Just to make
@@ -375,10 +372,10 @@ found:
pci_write_config_byte(pdev, SMBHSTCFG, temp | 0x01);
dev_info(&pdev->dev, "Enabling SMBus device\n");
} else {
- dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
- "controller not enabled! - upgrade BIOS or "
- "use force=1\n");
- error = -ENODEV;
+ error = dev_err_probe(&pdev->dev, -ENODEV,
+ "SMBUS: Error: Host SMBus "
+ "controller not enabled! - "
+ "upgrade BIOS or use force=1\n");
goto release_region;
}
}
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 503e2f4d6f84..1bd602852e35 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -278,7 +279,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
dev_dbg(&i2c->dev,
" %d: %s (flags %d) %d bytes to 0x%02x\n",
- i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ i, str_read_write(pmsg->flags & I2C_M_RD),
pmsg->flags, pmsg->len, pmsg->addr);
mutex_lock(&vb->lock);
@@ -384,15 +385,13 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
VPRBRD_USB_TIMEOUT_MS);
- if (ret != 1) {
- dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n",
- i2c_bus_freq);
- return -EIO;
- }
+ if (ret != 1)
+ return dev_err_probe(&pdev->dev, -EIO,
+ "failure setting i2c_bus_freq to %d\n",
+ i2c_bus_freq);
} else {
- dev_err(&pdev->dev,
- "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
- return -EIO;
+ return dev_err_probe(&pdev->dev, -EIO,
+ "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
}
vb_i2c->i2c.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 2a351f961b89..9b05ff53d3d7 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -192,10 +192,9 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
struct virtio_i2c *vi;
int ret;
- if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) {
- dev_err(&vdev->dev, "Zero-length request feature is mandatory\n");
- return -EINVAL;
- }
+ if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST))
+ return dev_err_probe(&vdev->dev, -EINVAL,
+ "Zero-length request feature is mandatory\n");
vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL);
if (!vi)
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 663fe5604dd6..b29dec66b2c3 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -101,8 +101,6 @@ struct slimpro_i2c_dev {
struct completion rd_complete;
u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
- phys_addr_t comm_base_addr;
- void *pcc_comm_addr;
};
#define to_slimpro_i2c_dev(cl) \
@@ -148,7 +146,8 @@ static void slimpro_i2c_rx_cb(struct mbox_client *cl, void *mssg)
static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
{
struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl);
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
/* Check if platform sends interrupt */
if (!xgene_word_tst_and_clr(&generic_comm_base->status,
@@ -169,7 +168,8 @@ static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg)
{
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
u32 *ptr = (void *)(generic_comm_base + 1);
u16 status;
int i;
@@ -457,22 +457,18 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = true;
cl->rx_callback = slimpro_i2c_rx_cb;
ctx->mbox_chan = mbox_request_channel(cl, MAILBOX_I2C_INDEX);
- if (IS_ERR(ctx->mbox_chan)) {
- dev_err(&pdev->dev, "i2c mailbox channel request failed\n");
- return PTR_ERR(ctx->mbox_chan);
- }
+ if (IS_ERR(ctx->mbox_chan))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ctx->mbox_chan),
+ "i2c mailbox channel request failed\n");
} else {
struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
- int version = XGENE_SLIMPRO_I2C_V1;
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
if (!acpi_id)
return -EINVAL;
- version = (int)acpi_id->driver_data;
-
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx))
ctx->mbox_idx = MAILBOX_I2C_INDEX;
@@ -480,48 +476,19 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = false;
cl->rx_callback = slimpro_i2c_pcc_rx_cb;
pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(pcc_chan)) {
- dev_err(&pdev->dev, "PCC mailbox channel request failed\n");
- return PTR_ERR(pcc_chan);
- }
+ if (IS_ERR(pcc_chan))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pcc_chan),
+ "PCC mailbox channel request failed\n");
ctx->pcc_chan = pcc_chan;
ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
- dev_err(&pdev->dev, "PCC IRQ not supported\n");
- rc = -ENOENT;
+ rc = dev_err_probe(&pdev->dev, -ENOENT,
+ "PCC IRQ not supported\n");
goto mbox_err;
}
- /*
- * This is the shared communication region
- * for the OS and Platform to communicate over.
- */
- ctx->comm_base_addr = pcc_chan->shmem_base_addr;
- if (ctx->comm_base_addr) {
- if (version == XGENE_SLIMPRO_I2C_V2)
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WT);
- else
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
- } else {
- dev_err(&pdev->dev, "Failed to get PCC comm region\n");
- rc = -ENOENT;
- goto mbox_err;
- }
-
- if (!ctx->pcc_comm_addr) {
- dev_err(&pdev->dev,
- "Failed to ioremap PCC comm region\n");
- rc = -ENOMEM;
- goto mbox_err;
- }
}
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index dc1e46d834dc..6bc1575cea6c 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -1489,7 +1489,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
pdev->name, i2c);
if (ret < 0) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ dev_err_probe(&pdev->dev, ret, "Cannot claim IRQ\n");
goto err_pm_disable;
}
@@ -1510,7 +1510,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
ret = xiic_reinit(i2c);
if (ret < 0) {
- dev_err(&pdev->dev, "Cannot xiic_reinit\n");
+ dev_err_probe(&pdev->dev, ret, "Cannot xiic_reinit\n");
goto err_pm_disable;
}
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 4d6abd7e92ce..06cf221557f2 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -500,10 +500,8 @@ static int scx200_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't fetch device resource info\n");
- return -ENODEV;
- }
+ if (!res)
+ return dev_err_probe(&pdev->dev, -ENODEV, "can't fetch device resource info\n");
iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev);
if (!iface)
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index 783fb8df2ebe..be7d6d41e0b2 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -16,32 +16,65 @@
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/lockdep.h>
#define ATR_MAX_ADAPTERS 100 /* Just a sanity limit */
#define ATR_MAX_SYMLINK_LEN 11 /* Longest name is 10 chars: "channel-99" */
/**
- * struct i2c_atr_alias_pair - Holds the alias assigned to a client.
+ * struct i2c_atr_alias_pair - Holds the alias assigned to a client address.
* @node: List node
- * @client: Pointer to the client on the child bus
+ * @addr: Address of the client on the child bus.
* @alias: I2C alias address assigned by the driver.
* This is the address that will be used to issue I2C transactions
* on the parent (physical) bus.
+ * @fixed: Alias pair cannot be replaced during dynamic address attachment.
+ * This flag is necessary for situations where a single I2C transaction
+ * contains more distinct target addresses than the ATR channel can handle.
+ * It marks addresses that have already been attached to an alias so
+ * that their alias pair is not evicted by a subsequent address in the same
+ * transaction.
+ *
*/
struct i2c_atr_alias_pair {
struct list_head node;
- const struct i2c_client *client;
+ bool fixed;
+ u16 addr;
u16 alias;
};
/**
+ * struct i2c_atr_alias_pool - Pool of client aliases available for an ATR.
+ * @size: Total number of aliases
+ * @shared: Indicates if this alias pool is shared by multiple channels
+ *
+ * @lock: Lock protecting @aliases and @use_mask
+ * @aliases: Array of aliases, must hold exactly @size elements
+ * @use_mask: Mask of used aliases
+ */
+struct i2c_atr_alias_pool {
+ size_t size;
+ bool shared;
+
+ /* Protects aliases and use_mask */
+ spinlock_t lock;
+ u16 *aliases;
+ unsigned long *use_mask;
+};
+
+/**
* struct i2c_atr_chan - Data for a channel.
* @adap: The &struct i2c_adapter for the channel
* @atr: The parent I2C ATR
* @chan_id: The ID of this channel
- * @alias_list: List of @struct i2c_atr_alias_pair containing the
+ * @alias_pairs_lock: Mutex protecting @alias_pairs
+ * @alias_pairs_lock_key: Lock key for @alias_pairs_lock
+ * @alias_pairs: List of @struct i2c_atr_alias_pair containing the
* assigned aliases
+ * @alias_pool: Pool of available client aliases
+ *
* @orig_addrs_lock: Mutex protecting @orig_addrs
+ * @orig_addrs_lock_key: Lock key for @orig_addrs_lock
* @orig_addrs: Buffer used to store the original addresses during transmit
* @orig_addrs_size: Size of @orig_addrs
*/
@@ -50,10 +83,15 @@ struct i2c_atr_chan {
struct i2c_atr *atr;
u32 chan_id;
- struct list_head alias_list;
+ /* Lock alias_pairs during attach/detach */
+ struct mutex alias_pairs_lock;
+ struct lock_class_key alias_pairs_lock_key;
+ struct list_head alias_pairs;
+ struct i2c_atr_alias_pool *alias_pool;
/* Lock orig_addrs during xfer */
struct mutex orig_addrs_lock;
+ struct lock_class_key orig_addrs_lock_key;
u16 *orig_addrs;
unsigned int orig_addrs_size;
};
@@ -66,11 +104,10 @@ struct i2c_atr_chan {
* @priv: Private driver data, set with i2c_atr_set_driver_data()
* @algo: The &struct i2c_algorithm for adapters
* @lock: Lock for the I2C bus segment (see &struct i2c_lock_operations)
+ * @lock_key: Lock key for @lock
* @max_adapters: Maximum number of adapters this I2C ATR can have
- * @num_aliases: Number of aliases in the aliases array
- * @aliases: The aliases array
- * @alias_mask_lock: Lock protecting alias_use_mask
- * @alias_use_mask: Bitmask for used aliases in aliases array
+ * @flags: Flags for ATR
+ * @alias_pool: Optional common pool of available client aliases
* @i2c_nb: Notifier for remote client add & del events
* @adapter: Array of adapters
*/
@@ -84,27 +121,135 @@ struct i2c_atr {
struct i2c_algorithm algo;
/* lock for the I2C bus segment (see struct i2c_lock_operations) */
struct mutex lock;
+ struct lock_class_key lock_key;
int max_adapters;
+ u32 flags;
- size_t num_aliases;
- const u16 *aliases;
- /* Protects alias_use_mask */
- spinlock_t alias_mask_lock;
- unsigned long *alias_use_mask;
+ struct i2c_atr_alias_pool *alias_pool;
struct notifier_block i2c_nb;
struct i2c_adapter *adapter[] __counted_by(max_adapters);
};
+static struct i2c_atr_alias_pool *i2c_atr_alloc_alias_pool(size_t num_aliases, bool shared)
+{
+ struct i2c_atr_alias_pool *alias_pool;
+ int ret;
+
+ alias_pool = kzalloc(sizeof(*alias_pool), GFP_KERNEL);
+ if (!alias_pool)
+ return ERR_PTR(-ENOMEM);
+
+ alias_pool->size = num_aliases;
+
+ alias_pool->aliases = kcalloc(num_aliases, sizeof(*alias_pool->aliases), GFP_KERNEL);
+ if (!alias_pool->aliases) {
+ ret = -ENOMEM;
+ goto err_free_alias_pool;
+ }
+
+ alias_pool->use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL);
+ if (!alias_pool->use_mask) {
+ ret = -ENOMEM;
+ goto err_free_aliases;
+ }
+
+ alias_pool->shared = shared;
+
+ spin_lock_init(&alias_pool->lock);
+
+ return alias_pool;
+
+err_free_aliases:
+ kfree(alias_pool->aliases);
+err_free_alias_pool:
+ kfree(alias_pool);
+ return ERR_PTR(ret);
+}
+
+static void i2c_atr_free_alias_pool(struct i2c_atr_alias_pool *alias_pool)
+{
+ bitmap_free(alias_pool->use_mask);
+ kfree(alias_pool->aliases);
+ kfree(alias_pool);
+}
+
+/* Must be called with alias_pairs_lock held */
+static struct i2c_atr_alias_pair *i2c_atr_create_c2a(struct i2c_atr_chan *chan,
+ u16 alias, u16 addr)
+{
+ struct i2c_atr_alias_pair *c2a;
+
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ c2a = kzalloc(sizeof(*c2a), GFP_KERNEL);
+ if (!c2a)
+ return NULL;
+
+ c2a->addr = addr;
+ c2a->alias = alias;
+
+ list_add(&c2a->node, &chan->alias_pairs);
+
+ return c2a;
+}
+
+/* Must be called with alias_pairs_lock held */
+static void i2c_atr_destroy_c2a(struct i2c_atr_alias_pair **pc2a)
+{
+ list_del(&(*pc2a)->node);
+ kfree(*pc2a);
+ *pc2a = NULL;
+}
+
+static int i2c_atr_reserve_alias(struct i2c_atr_alias_pool *alias_pool)
+{
+ unsigned long idx;
+ u16 alias;
+
+ spin_lock(&alias_pool->lock);
+
+ idx = find_first_zero_bit(alias_pool->use_mask, alias_pool->size);
+ if (idx >= alias_pool->size) {
+ spin_unlock(&alias_pool->lock);
+ return -EBUSY;
+ }
+
+ set_bit(idx, alias_pool->use_mask);
+
+ alias = alias_pool->aliases[idx];
+
+ spin_unlock(&alias_pool->lock);
+ return alias;
+}
+
+static void i2c_atr_release_alias(struct i2c_atr_alias_pool *alias_pool, u16 alias)
+{
+ unsigned int idx;
+
+ spin_lock(&alias_pool->lock);
+
+ for (idx = 0; idx < alias_pool->size; ++idx) {
+ if (alias_pool->aliases[idx] == alias) {
+ clear_bit(idx, alias_pool->use_mask);
+ spin_unlock(&alias_pool->lock);
+ return;
+ }
+ }
+
+ spin_unlock(&alias_pool->lock);
+}
+
static struct i2c_atr_alias_pair *
-i2c_atr_find_mapping_by_client(const struct list_head *list,
- const struct i2c_client *client)
+i2c_atr_find_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
{
struct i2c_atr_alias_pair *c2a;
- list_for_each_entry(c2a, list, node) {
- if (c2a->client == client)
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ list_for_each_entry(c2a, &chan->alias_pairs, node) {
+ if (c2a->addr == addr)
return c2a;
}
@@ -112,18 +257,107 @@ i2c_atr_find_mapping_by_client(const struct list_head *list,
}
static struct i2c_atr_alias_pair *
-i2c_atr_find_mapping_by_addr(const struct list_head *list, u16 phys_addr)
+i2c_atr_create_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
{
+ struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
+ u16 alias;
+ int ret;
- list_for_each_entry(c2a, list, node) {
- if (c2a->client->addr == phys_addr)
- return c2a;
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ ret = i2c_atr_reserve_alias(chan->alias_pool);
+ if (ret < 0)
+ return NULL;
+
+ alias = ret;
+
+ c2a = i2c_atr_create_c2a(chan, alias, addr);
+ if (!c2a)
+ goto err_release_alias;
+
+ ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias);
+ if (ret) {
+ dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n",
+ addr, chan->chan_id, ret);
+ goto err_del_c2a;
}
+ return c2a;
+
+err_del_c2a:
+ i2c_atr_destroy_c2a(&c2a);
+err_release_alias:
+ i2c_atr_release_alias(chan->alias_pool, alias);
return NULL;
}
+static struct i2c_atr_alias_pair *
+i2c_atr_replace_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
+{
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+ struct list_head *alias_pairs;
+ bool found = false;
+ u16 alias;
+ int ret;
+
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ alias_pairs = &chan->alias_pairs;
+
+ if (unlikely(list_empty(alias_pairs)))
+ return NULL;
+
+ list_for_each_entry_reverse(c2a, alias_pairs, node) {
+ if (!c2a->fixed) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return NULL;
+
+ atr->ops->detach_addr(atr, chan->chan_id, c2a->addr);
+ c2a->addr = addr;
+
+ list_move(&c2a->node, alias_pairs);
+
+ alias = c2a->alias;
+
+ ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias);
+ if (ret) {
+ dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n",
+ addr, chan->chan_id, ret);
+ i2c_atr_destroy_c2a(&c2a);
+ i2c_atr_release_alias(chan->alias_pool, alias);
+ return NULL;
+ }
+
+ return c2a;
+}
+
+static struct i2c_atr_alias_pair *
+i2c_atr_get_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
+{
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+
+ c2a = i2c_atr_find_mapping_by_addr(chan, addr);
+ if (c2a)
+ return c2a;
+
+ if (atr->flags & I2C_ATR_F_STATIC)
+ return NULL;
+
+ c2a = i2c_atr_create_mapping_by_addr(chan, addr);
+ if (c2a)
+ return c2a;
+
+ return i2c_atr_replace_mapping_by_addr(chan, addr);
+}
+
/*
* Replace all message addresses with their aliases, saving the original
* addresses.
@@ -136,7 +370,7 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
{
struct i2c_atr *atr = chan->atr;
static struct i2c_atr_alias_pair *c2a;
- int i;
+ int i, ret = 0;
/* Ensure we have enough room to save the original addresses */
if (unlikely(chan->orig_addrs_size < num)) {
@@ -152,25 +386,36 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
chan->orig_addrs_size = num;
}
+ mutex_lock(&chan->alias_pairs_lock);
+
for (i = 0; i < num; i++) {
chan->orig_addrs[i] = msgs[i].addr;
- c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list,
- msgs[i].addr);
+ c2a = i2c_atr_get_mapping_by_addr(chan, msgs[i].addr);
+
if (!c2a) {
+ if (atr->flags & I2C_ATR_F_PASSTHROUGH)
+ continue;
+
dev_err(atr->dev, "client 0x%02x not mapped!\n",
msgs[i].addr);
while (i--)
msgs[i].addr = chan->orig_addrs[i];
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_unlock;
}
+ // Prevent c2a from being overwritten by another client in this transaction
+ c2a->fixed = true;
+
msgs[i].addr = c2a->alias;
}
- return 0;
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
+ return ret;
}
/*
@@ -183,10 +428,24 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
static void i2c_atr_unmap_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
int num)
{
+ struct i2c_atr_alias_pair *c2a;
int i;
for (i = 0; i < num; i++)
msgs[i].addr = chan->orig_addrs[i];
+
+ mutex_lock(&chan->alias_pairs_lock);
+
+ if (unlikely(list_empty(&chan->alias_pairs)))
+ goto out_unlock;
+
+ // unfix c2a entries so that subsequent transfers can reuse their aliases
+ list_for_each_entry(c2a, &chan->alias_pairs, node) {
+ c2a->fixed = false;
+ }
+
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
}
static int i2c_atr_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
@@ -224,14 +483,23 @@ static int i2c_atr_smbus_xfer(struct i2c_adapter *adap, u16 addr,
struct i2c_atr *atr = chan->atr;
struct i2c_adapter *parent = atr->parent;
struct i2c_atr_alias_pair *c2a;
+ u16 alias;
- c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, addr);
- if (!c2a) {
+ mutex_lock(&chan->alias_pairs_lock);
+
+ c2a = i2c_atr_get_mapping_by_addr(chan, addr);
+
+ if (!c2a && !(atr->flags & I2C_ATR_F_PASSTHROUGH)) {
dev_err(atr->dev, "client 0x%02x not mapped!\n", addr);
+ mutex_unlock(&chan->alias_pairs_lock);
return -ENXIO;
}
- return i2c_smbus_xfer(parent, c2a->alias, flags, read_write, command,
+ alias = c2a ? c2a->alias : addr;
+
+ mutex_unlock(&chan->alias_pairs_lock);
+
+ return i2c_smbus_xfer(parent, alias, flags, read_write, command,
size, data);
}
@@ -273,112 +541,60 @@ static const struct i2c_lock_operations i2c_atr_lock_ops = {
.unlock_bus = i2c_atr_unlock_bus,
};
-static int i2c_atr_reserve_alias(struct i2c_atr *atr)
-{
- unsigned long idx;
-
- spin_lock(&atr->alias_mask_lock);
-
- idx = find_first_zero_bit(atr->alias_use_mask, atr->num_aliases);
- if (idx >= atr->num_aliases) {
- spin_unlock(&atr->alias_mask_lock);
- dev_err(atr->dev, "failed to find a free alias\n");
- return -EBUSY;
- }
-
- set_bit(idx, atr->alias_use_mask);
-
- spin_unlock(&atr->alias_mask_lock);
-
- return atr->aliases[idx];
-}
-
-static void i2c_atr_release_alias(struct i2c_atr *atr, u16 alias)
-{
- unsigned int idx;
-
- spin_lock(&atr->alias_mask_lock);
-
- for (idx = 0; idx < atr->num_aliases; ++idx) {
- if (atr->aliases[idx] == alias) {
- clear_bit(idx, atr->alias_use_mask);
- spin_unlock(&atr->alias_mask_lock);
- return;
- }
- }
-
- spin_unlock(&atr->alias_mask_lock);
-
- /* This should never happen */
- dev_warn(atr->dev, "Unable to find mapped alias\n");
-}
-
-static int i2c_atr_attach_client(struct i2c_adapter *adapter,
- const struct i2c_client *client)
+static int i2c_atr_attach_addr(struct i2c_adapter *adapter,
+ u16 addr)
{
struct i2c_atr_chan *chan = adapter->algo_data;
struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
- u16 alias;
- int ret;
+ int ret = 0;
- ret = i2c_atr_reserve_alias(atr);
- if (ret < 0)
- return ret;
+ mutex_lock(&chan->alias_pairs_lock);
- alias = ret;
+ c2a = i2c_atr_create_mapping_by_addr(chan, addr);
+ if (!c2a && !(atr->flags & I2C_ATR_F_STATIC))
+ c2a = i2c_atr_replace_mapping_by_addr(chan, addr);
- c2a = kzalloc(sizeof(*c2a), GFP_KERNEL);
if (!c2a) {
- ret = -ENOMEM;
- goto err_release_alias;
+ dev_err(atr->dev, "failed to find a free alias\n");
+ ret = -EBUSY;
+ goto out_unlock;
}
- ret = atr->ops->attach_client(atr, chan->chan_id, client, alias);
- if (ret)
- goto err_free;
-
- dev_dbg(atr->dev, "chan%u: client 0x%02x mapped at alias 0x%02x (%s)\n",
- chan->chan_id, client->addr, alias, client->name);
-
- c2a->client = client;
- c2a->alias = alias;
- list_add(&c2a->node, &chan->alias_list);
-
- return 0;
-
-err_free:
- kfree(c2a);
-err_release_alias:
- i2c_atr_release_alias(atr, alias);
+ dev_dbg(atr->dev, "chan%u: using alias 0x%02x for addr 0x%02x\n",
+ chan->chan_id, c2a->alias, addr);
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
return ret;
}
-static void i2c_atr_detach_client(struct i2c_adapter *adapter,
- const struct i2c_client *client)
+static void i2c_atr_detach_addr(struct i2c_adapter *adapter,
+ u16 addr)
{
struct i2c_atr_chan *chan = adapter->algo_data;
struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
- atr->ops->detach_client(atr, chan->chan_id, client);
+ atr->ops->detach_addr(atr, chan->chan_id, addr);
+
+ mutex_lock(&chan->alias_pairs_lock);
- c2a = i2c_atr_find_mapping_by_client(&chan->alias_list, client);
+ c2a = i2c_atr_find_mapping_by_addr(chan, addr);
if (!c2a) {
- /* This should never happen */
- dev_warn(atr->dev, "Unable to find address mapping\n");
+ mutex_unlock(&chan->alias_pairs_lock);
return;
}
- i2c_atr_release_alias(atr, c2a->alias);
+ i2c_atr_release_alias(chan->alias_pool, c2a->alias);
dev_dbg(atr->dev,
- "chan%u: client 0x%02x unmapped from alias 0x%02x (%s)\n",
- chan->chan_id, client->addr, c2a->alias, client->name);
+ "chan%u: detached alias 0x%02x from addr 0x%02x\n",
+ chan->chan_id, c2a->alias, addr);
- list_del(&c2a->node);
- kfree(c2a);
+ i2c_atr_destroy_c2a(&c2a);
+
+ mutex_unlock(&chan->alias_pairs_lock);
}
static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
@@ -405,7 +621,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
switch (event) {
case BUS_NOTIFY_ADD_DEVICE:
- ret = i2c_atr_attach_client(client->adapter, client);
+ ret = i2c_atr_attach_addr(client->adapter, client->addr);
if (ret)
dev_err(atr->dev,
"Failed to attach remote client '%s': %d\n",
@@ -413,7 +629,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
break;
case BUS_NOTIFY_REMOVED_DEVICE:
- i2c_atr_detach_client(client->adapter, client);
+ i2c_atr_detach_addr(client->adapter, client->addr);
break;
default:
@@ -425,29 +641,43 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
static int i2c_atr_parse_alias_pool(struct i2c_atr *atr)
{
+ struct i2c_atr_alias_pool *alias_pool;
struct device *dev = atr->dev;
- unsigned long *alias_use_mask;
size_t num_aliases;
unsigned int i;
u32 *aliases32;
- u16 *aliases16;
int ret;
- ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool");
- if (ret < 0) {
- dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n",
- ret);
+ if (!fwnode_property_present(dev_fwnode(dev), "i2c-alias-pool")) {
+ num_aliases = 0;
+ } else {
+ ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool");
+ if (ret < 0) {
+ dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n",
+ ret);
+ return ret;
+ }
+
+ num_aliases = ret;
+ }
+
+ alias_pool = i2c_atr_alloc_alias_pool(num_aliases, true);
+ if (IS_ERR(alias_pool)) {
+ ret = PTR_ERR(alias_pool);
+ dev_err(dev, "Failed to allocate alias pool, err %d\n", ret);
return ret;
}
- num_aliases = ret;
+ atr->alias_pool = alias_pool;
- if (!num_aliases)
+ if (!alias_pool->size)
return 0;
aliases32 = kcalloc(num_aliases, sizeof(*aliases32), GFP_KERNEL);
- if (!aliases32)
- return -ENOMEM;
+ if (!aliases32) {
+ ret = -ENOMEM;
+ goto err_free_alias_pool;
+ }
ret = fwnode_property_read_u32_array(dev_fwnode(dev), "i2c-alias-pool",
aliases32, num_aliases);
@@ -457,48 +687,33 @@ static int i2c_atr_parse_alias_pool(struct i2c_atr *atr)
goto err_free_aliases32;
}
- aliases16 = kcalloc(num_aliases, sizeof(*aliases16), GFP_KERNEL);
- if (!aliases16) {
- ret = -ENOMEM;
- goto err_free_aliases32;
- }
-
for (i = 0; i < num_aliases; i++) {
if (!(aliases32[i] & 0xffff0000)) {
- aliases16[i] = aliases32[i];
+ alias_pool->aliases[i] = aliases32[i];
continue;
}
dev_err(dev, "Failed to parse 'i2c-alias-pool' property: I2C flags are not supported\n");
ret = -EINVAL;
- goto err_free_aliases16;
- }
-
- alias_use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL);
- if (!alias_use_mask) {
- ret = -ENOMEM;
- goto err_free_aliases16;
+ goto err_free_aliases32;
}
kfree(aliases32);
- atr->num_aliases = num_aliases;
- atr->aliases = aliases16;
- atr->alias_use_mask = alias_use_mask;
-
- dev_dbg(dev, "i2c-alias-pool has %zu aliases", atr->num_aliases);
+ dev_dbg(dev, "i2c-alias-pool has %zu aliases\n", alias_pool->size);
return 0;
-err_free_aliases16:
- kfree(aliases16);
err_free_aliases32:
kfree(aliases32);
+err_free_alias_pool:
+ i2c_atr_free_alias_pool(alias_pool);
return ret;
}
struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
- const struct i2c_atr_ops *ops, int max_adapters)
+ const struct i2c_atr_ops *ops, int max_adapters,
+ u32 flags)
{
struct i2c_atr *atr;
int ret;
@@ -506,20 +721,21 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
if (max_adapters > ATR_MAX_ADAPTERS)
return ERR_PTR(-EINVAL);
- if (!ops || !ops->attach_client || !ops->detach_client)
+ if (!ops || !ops->attach_addr || !ops->detach_addr)
return ERR_PTR(-EINVAL);
atr = kzalloc(struct_size(atr, adapter, max_adapters), GFP_KERNEL);
if (!atr)
return ERR_PTR(-ENOMEM);
- mutex_init(&atr->lock);
- spin_lock_init(&atr->alias_mask_lock);
+ lockdep_register_key(&atr->lock_key);
+ mutex_init_with_key(&atr->lock, &atr->lock_key);
atr->parent = parent;
atr->dev = dev;
atr->ops = ops;
atr->max_adapters = max_adapters;
+ atr->flags = flags;
if (parent->algo->master_xfer)
atr->algo.master_xfer = i2c_atr_master_xfer;
@@ -534,15 +750,15 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
atr->i2c_nb.notifier_call = i2c_atr_bus_notifier_call;
ret = bus_register_notifier(&i2c_bus_type, &atr->i2c_nb);
if (ret)
- goto err_free_aliases;
+ goto err_free_alias_pool;
return atr;
-err_free_aliases:
- bitmap_free(atr->alias_use_mask);
- kfree(atr->aliases);
+err_free_alias_pool:
+ i2c_atr_free_alias_pool(atr->alias_pool);
err_destroy_mutex:
mutex_destroy(&atr->lock);
+ lockdep_unregister_key(&atr->lock_key);
kfree(atr);
return ERR_PTR(ret);
@@ -557,22 +773,22 @@ void i2c_atr_delete(struct i2c_atr *atr)
WARN_ON(atr->adapter[i]);
bus_unregister_notifier(&i2c_bus_type, &atr->i2c_nb);
- bitmap_free(atr->alias_use_mask);
- kfree(atr->aliases);
+ i2c_atr_free_alias_pool(atr->alias_pool);
mutex_destroy(&atr->lock);
+ lockdep_unregister_key(&atr->lock_key);
kfree(atr);
}
EXPORT_SYMBOL_NS_GPL(i2c_atr_delete, "I2C_ATR");
-int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
- struct device *adapter_parent,
- struct fwnode_handle *bus_handle)
+int i2c_atr_add_adapter(struct i2c_atr *atr, struct i2c_atr_adap_desc *desc)
{
+ struct fwnode_handle *bus_handle = desc->bus_handle;
struct i2c_adapter *parent = atr->parent;
+ char symlink_name[ATR_MAX_SYMLINK_LEN];
struct device *dev = atr->dev;
+ u32 chan_id = desc->chan_id;
struct i2c_atr_chan *chan;
- char symlink_name[ATR_MAX_SYMLINK_LEN];
- int ret;
+ int ret, idx;
if (chan_id >= atr->max_adapters) {
dev_err(dev, "No room for more i2c-atr adapters\n");
@@ -588,20 +804,23 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
if (!chan)
return -ENOMEM;
- if (!adapter_parent)
- adapter_parent = dev;
+ if (!desc->parent)
+ desc->parent = dev;
chan->atr = atr;
chan->chan_id = chan_id;
- INIT_LIST_HEAD(&chan->alias_list);
- mutex_init(&chan->orig_addrs_lock);
+ INIT_LIST_HEAD(&chan->alias_pairs);
+ lockdep_register_key(&chan->alias_pairs_lock_key);
+ lockdep_register_key(&chan->orig_addrs_lock_key);
+ mutex_init_with_key(&chan->alias_pairs_lock, &chan->alias_pairs_lock_key);
+ mutex_init_with_key(&chan->orig_addrs_lock, &chan->orig_addrs_lock_key);
snprintf(chan->adap.name, sizeof(chan->adap.name), "i2c-%d-atr-%d",
i2c_adapter_id(parent), chan_id);
chan->adap.owner = THIS_MODULE;
chan->adap.algo = &atr->algo;
chan->adap.algo_data = chan;
- chan->adap.dev.parent = adapter_parent;
+ chan->adap.dev.parent = desc->parent;
chan->adap.retries = parent->retries;
chan->adap.timeout = parent->timeout;
chan->adap.quirks = parent->quirks;
@@ -628,13 +847,26 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
fwnode_handle_put(atr_node);
}
+ if (desc->num_aliases > 0) {
+ chan->alias_pool = i2c_atr_alloc_alias_pool(desc->num_aliases, false);
+ if (IS_ERR(chan->alias_pool)) {
+ ret = PTR_ERR(chan->alias_pool);
+ goto err_fwnode_put;
+ }
+
+ for (idx = 0; idx < desc->num_aliases; idx++)
+ chan->alias_pool->aliases[idx] = desc->aliases[idx];
+ } else {
+ chan->alias_pool = atr->alias_pool;
+ }
+
atr->adapter[chan_id] = &chan->adap;
ret = i2c_add_adapter(&chan->adap);
if (ret) {
dev_err(dev, "failed to add atr-adapter %u (error=%d)\n",
chan_id, ret);
- goto err_fwnode_put;
+ goto err_free_alias_pool;
}
snprintf(symlink_name, sizeof(symlink_name), "channel-%u",
@@ -651,9 +883,15 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
return 0;
+err_free_alias_pool:
+ if (!chan->alias_pool->shared)
+ i2c_atr_free_alias_pool(chan->alias_pool);
err_fwnode_put:
fwnode_handle_put(dev_fwnode(&chan->adap.dev));
mutex_destroy(&chan->orig_addrs_lock);
+ mutex_destroy(&chan->alias_pairs_lock);
+ lockdep_unregister_key(&chan->orig_addrs_lock_key);
+ lockdep_unregister_key(&chan->alias_pairs_lock_key);
kfree(chan);
return ret;
}
@@ -683,10 +921,16 @@ void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id)
i2c_del_adapter(adap);
+ if (!chan->alias_pool->shared)
+ i2c_atr_free_alias_pool(chan->alias_pool);
+
atr->adapter[chan_id] = NULL;
fwnode_handle_put(fwnode);
mutex_destroy(&chan->orig_addrs_lock);
+ mutex_destroy(&chan->alias_pairs_lock);
+ lockdep_unregister_key(&chan->orig_addrs_lock_key);
+ lockdep_unregister_key(&chan->alias_pairs_lock_key);
kfree(chan->orig_addrs);
kfree(chan);
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7ad1ad5c8c3f..2ad2b1838f0f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -26,14 +26,13 @@
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/irqflags.h>
+#include <linux/irq.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/pm_domain.h>
@@ -42,6 +41,7 @@
#include <linux/property.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "i2c-core.h"
@@ -490,6 +490,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
static int i2c_device_probe(struct device *dev)
{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
bool do_power_on;
@@ -508,11 +509,11 @@ static int i2c_device_probe(struct device *dev)
/* Keep adapter active when Host Notify is required */
pm_runtime_get_sync(&client->adapter->dev);
irq = i2c_smbus_host_notify_to_irq(client);
- } else if (dev->of_node) {
- irq = of_irq_get_byname(dev->of_node, "irq");
+ } else if (is_of_node(fwnode)) {
+ irq = fwnode_irq_get_byname(fwnode, "irq");
if (irq == -EINVAL || irq == -ENODATA)
- irq = of_irq_get(dev->of_node, 0);
- } else if (ACPI_COMPANION(dev)) {
+ irq = fwnode_irq_get(fwnode, 0);
+ } else if (is_acpi_device_node(fwnode)) {
bool wake_capable;
irq = i2c_acpi_get_irq(client, &wake_capable);
@@ -520,7 +521,7 @@ static int i2c_device_probe(struct device *dev)
client->flags |= I2C_CLIENT_WAKE;
}
if (irq == -EPROBE_DEFER) {
- status = irq;
+ status = dev_err_probe(dev, irq, "can't get irq\n");
goto put_sync_adapter;
}
@@ -546,9 +547,9 @@ static int i2c_device_probe(struct device *dev)
if (client->flags & I2C_CLIENT_WAKE) {
int wakeirq;
- wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ wakeirq = fwnode_irq_get_byname(fwnode, "wakeup");
if (wakeirq == -EPROBE_DEFER) {
- status = wakeirq;
+ status = dev_err_probe(dev, wakeirq, "can't get wakeirq\n");
goto put_sync_adapter;
}
@@ -567,7 +568,7 @@ static int i2c_device_probe(struct device *dev)
dev_dbg(dev, "probe\n");
- status = of_clk_set_defaults(dev->of_node, false);
+ status = of_clk_set_defaults(to_of_node(fwnode), false);
if (status < 0)
goto err_clear_wakeup_irq;
@@ -961,6 +962,7 @@ static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr,
struct i2c_client *
i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
{
+ struct fwnode_handle *fwnode = info->fwnode;
struct i2c_client *client;
bool need_put = false;
int status;
@@ -1001,18 +1003,18 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
- client->dev.of_node = of_node_get(info->of_node);
- client->dev.fwnode = info->fwnode;
device_enable_async_suspend(&client->dev);
+ device_set_node(&client->dev, fwnode_handle_get(fwnode));
+
if (info->swnode) {
status = device_add_software_node(&client->dev, info->swnode);
if (status) {
dev_err(&adap->dev,
"Failed to add software node to client %s: %d\n",
client->name, status);
- goto out_err_put_of_node;
+ goto out_err_put_fwnode;
}
}
@@ -1031,8 +1033,8 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
out_remove_swnode:
device_remove_software_node(&client->dev);
need_put = true;
-out_err_put_of_node:
- of_node_put(info->of_node);
+out_err_put_fwnode:
+ fwnode_handle_put(fwnode);
out_err:
dev_err(&adap->dev,
"Failed to register i2c client %s at 0x%02x (%d)\n",
@@ -1054,16 +1056,17 @@ EXPORT_SYMBOL_GPL(i2c_new_client_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ struct fwnode_handle *fwnode;
+
if (IS_ERR_OR_NULL(client))
return;
- if (client->dev.of_node) {
- of_node_clear_flag(client->dev.of_node, OF_POPULATED);
- of_node_put(client->dev.of_node);
- }
-
- if (ACPI_COMPANION(&client->dev))
- acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
+ fwnode = dev_fwnode(&client->dev);
+ if (is_of_node(fwnode))
+ of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
+ else if (is_acpi_device_node(fwnode))
+ acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
+ fwnode_handle_put(fwnode);
device_remove_software_node(&client->dev);
device_unregister(&client->dev);
@@ -1209,11 +1212,9 @@ struct i2c_client *i2c_new_ancillary_device(struct i2c_client *client,
u32 addr = default_addr;
int i;
- if (np) {
- i = of_property_match_string(np, "reg-names", name);
- if (i >= 0)
- of_property_read_u32_index(np, "reg", i, &addr);
- }
+ i = of_property_match_string(np, "reg-names", name);
+ if (i >= 0)
+ of_property_read_u32_index(np, "reg", i, &addr);
dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr);
return i2c_new_dummy_device(client->adapter, addr);
@@ -1651,12 +1652,10 @@ int i2c_add_adapter(struct i2c_adapter *adapter)
struct device *dev = &adapter->dev;
int id;
- if (dev->of_node) {
- id = of_alias_get_id(dev->of_node, "i2c");
- if (id >= 0) {
- adapter->nr = id;
- return __i2c_add_numbered_adapter(adapter);
- }
+ id = of_alias_get_id(dev->of_node, "i2c");
+ if (id >= 0) {
+ adapter->nr = id;
+ return __i2c_add_numbered_adapter(adapter);
}
mutex_lock(&core_lock);
@@ -2146,7 +2145,7 @@ static int i2c_quirk_error(struct i2c_adapter *adap, struct i2c_msg *msg, char *
{
dev_err_ratelimited(&adap->dev, "adapter quirk: %s (addr 0x%04x, size %u, %s)\n",
err_msg, msg->addr, msg->len,
- msg->flags & I2C_M_RD ? "read" : "write");
+ str_read_write(msg->flags & I2C_M_RD));
return -EOPNOTSUPP;
}
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 02feee6c9ba9..eb7fb202355f 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -49,7 +49,6 @@ int of_i2c_get_board_info(struct device *dev, struct device_node *node,
}
info->addr = addr;
- info->of_node = node;
info->fwnode = of_fwnode_handle(node);
if (of_property_read_bool(node, "host-notify"))
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index faefe1dfa8e5..7ee6b992b835 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/of.h>
+#include <linux/property.h>
#include "i2c-core.h"
@@ -108,15 +109,18 @@ EXPORT_SYMBOL_GPL(i2c_slave_event);
*/
bool i2c_detect_slave_mode(struct device *dev)
{
- if (IS_BUILTIN(CONFIG_OF) && dev->of_node) {
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (is_of_node(fwnode)) {
+ struct fwnode_handle *child __free(fwnode_handle) = NULL;
u32 reg;
- for_each_child_of_node_scoped(dev->of_node, child) {
- of_property_read_u32(child, "reg", &reg);
+ fwnode_for_each_child_node(fwnode, child) {
+ fwnode_property_read_u32(child, "reg", &reg);
if (reg & I2C_OWN_SLAVE_ADDRESS)
return true;
}
- } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) {
+ } else if (is_acpi_device_node(fwnode)) {
dev_dbg(dev, "ACPI slave is not supported yet\n");
}
return false;
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index e73afbefe222..71eb1ef56f0c 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -16,6 +16,7 @@
#include <linux/i2c-smbus.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "i2c-core.h"
@@ -433,7 +434,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
case I2C_SMBUS_I2C_BLOCK_DATA:
if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
dev_err(&adapter->dev, "Invalid block %s size %d\n",
- read_write == I2C_SMBUS_READ ? "read" : "write",
+ str_read_write(read_write == I2C_SMBUS_READ),
data->block[0]);
return -EINVAL;
}
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 7d40e7aa3799..0316b347f9e7 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -372,12 +372,13 @@ EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device);
* - Only works on systems with 1 to 8 memory slots
*/
#if IS_ENABLED(CONFIG_DMI)
-void i2c_register_spd(struct i2c_adapter *adap)
+static void i2c_register_spd(struct i2c_adapter *adap, bool write_disabled)
{
int n, slot_count = 0, dimm_count = 0;
u16 handle;
u8 common_mem_type = 0x0, mem_type;
u64 mem_size;
+ bool instantiate = true;
const char *name;
while ((handle = dmi_memdev_handle(slot_count)) != 0xffff) {
@@ -438,6 +439,7 @@ void i2c_register_spd(struct i2c_adapter *adap)
case 0x22: /* DDR5 */
case 0x23: /* LPDDR5 */
name = "spd5118";
+ instantiate = !write_disabled;
break;
default:
dev_info(&adap->dev,
@@ -461,6 +463,9 @@ void i2c_register_spd(struct i2c_adapter *adap)
addr_list[0] = 0x50 + n;
addr_list[1] = I2C_CLIENT_END;
+ if (!instantiate)
+ continue;
+
if (!IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL))) {
dev_info(&adap->dev,
"Successfully instantiated SPD at 0x%hx\n",
@@ -469,7 +474,19 @@ void i2c_register_spd(struct i2c_adapter *adap)
}
}
}
-EXPORT_SYMBOL_GPL(i2c_register_spd);
+
+void i2c_register_spd_write_disable(struct i2c_adapter *adap)
+{
+ i2c_register_spd(adap, true);
+}
+EXPORT_SYMBOL_GPL(i2c_register_spd_write_disable);
+
+void i2c_register_spd_write_enable(struct i2c_adapter *adap)
+{
+ i2c_register_spd(adap, false);
+}
+EXPORT_SYMBOL_GPL(i2c_register_spd_write_enable);
+
#endif
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 8a87f19bf5d5..c688af270a11 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -85,13 +85,13 @@ static int ltc4306_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(1 - offset));
}
-static void ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ltc4306 *data = gpiochip_get_data(chip);
- regmap_update_bits(data->regmap, LTC_REG_CONFIG, BIT(5 - offset),
- value ? BIT(5 - offset) : 0);
+ return regmap_update_bits(data->regmap, LTC_REG_CONFIG,
+ BIT(5 - offset), value ? BIT(5 - offset) : 0);
}
static int ltc4306_gpio_get_direction(struct gpio_chip *chip,
@@ -164,7 +164,7 @@ static int ltc4306_gpio_init(struct ltc4306 *data)
data->gpiochip.direction_input = ltc4306_gpio_direction_input;
data->gpiochip.direction_output = ltc4306_gpio_direction_output;
data->gpiochip.get = ltc4306_gpio_get;
- data->gpiochip.set = ltc4306_gpio_set;
+ data->gpiochip.set_rv = ltc4306_gpio_set;
data->gpiochip.set_config = ltc4306_gpio_set_config;
data->gpiochip.owner = THIS_MODULE;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 142170473e75..8670e58675c6 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -36,6 +36,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
#define CM_DIRECT_RETRY_CTX ((void *) 1UL)
+#define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
@@ -167,7 +168,7 @@ struct cm_port {
struct cm_device {
struct kref kref;
struct list_head list;
- spinlock_t mad_agent_lock;
+ rwlock_t mad_agent_lock;
struct ib_device *ib_device;
u8 ack_delay;
int going_down;
@@ -241,7 +242,6 @@ struct cm_id_private {
u8 initiator_depth;
u8 retry_count;
u8 rnr_retry_count;
- u8 service_timeout;
u8 target_ack_delay;
struct list_head work_list;
@@ -285,7 +285,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return ERR_PTR(-EINVAL);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
mad_agent = cm_id_priv->av.port->mad_agent;
if (!mad_agent) {
m = ERR_PTR(-EINVAL);
@@ -311,7 +311,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
m->ah = ah;
out:
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return m;
}
@@ -1297,10 +1297,10 @@ static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return cpu_to_be64(low_tid);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
if (cm_id_priv->av.port->mad_agent)
hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return cpu_to_be64(hi_tid | low_tid);
}
@@ -1872,7 +1872,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
static void cm_format_mra(struct cm_mra_msg *mra_msg,
struct cm_id_private *cm_id_priv,
- enum cm_msg_response msg_mraed, u8 service_timeout,
+ enum cm_msg_response msg_mraed,
const void *private_data, u8 private_data_len)
{
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
@@ -1881,7 +1881,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
be32_to_cpu(cm_id_priv->id.local_id));
IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
- IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
+ IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, CM_MRA_SETTING);
if (private_data && private_data_len)
IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
@@ -1960,7 +1960,7 @@ static void cm_dup_req_handler(struct cm_work *work,
switch (cm_id_priv->id.state) {
case IB_CM_MRA_REQ_SENT:
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REQ,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
break;
@@ -2454,7 +2454,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
cm_id_priv->private_data_len);
else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REP,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else
@@ -3094,26 +3094,13 @@ out:
return -EINVAL;
}
-int ib_send_cm_mra(struct ib_cm_id *cm_id,
- u8 service_timeout,
- const void *private_data,
- u8 private_data_len)
+int ib_prepare_cm_mra(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
enum ib_cm_state cm_state;
enum ib_cm_lap_state lap_state;
- enum cm_msg_response msg_response;
- void *data;
unsigned long flags;
- int ret;
-
- if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
- return -EINVAL;
-
- data = cm_copy_private_data(private_data, private_data_len);
- if (IS_ERR(data))
- return PTR_ERR(data);
+ int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3122,58 +3109,33 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REQ;
break;
case IB_CM_REP_RCVD:
cm_state = IB_CM_MRA_REP_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
- msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
fallthrough;
default:
- trace_icm_send_mra_unknown_err(&cm_id_priv->id);
+ trace_icm_prepare_mra_unknown_err(&cm_id_priv->id);
ret = -EINVAL;
goto error_unlock;
}
- if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
- msg = cm_alloc_msg(cm_id_priv);
- if (IS_ERR(msg)) {
- ret = PTR_ERR(msg);
- goto error_unlock;
- }
-
- cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- msg_response, service_timeout,
- private_data, private_data_len);
- trace_icm_send_mra(cm_id);
- ret = ib_post_send_mad(msg, NULL);
- if (ret)
- goto error_free_msg;
- }
-
cm_id->state = cm_state;
cm_id->lap_state = lap_state;
- cm_id_priv->service_timeout = service_timeout;
- cm_set_private_data(cm_id_priv, data, private_data_len);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return 0;
+ cm_set_private_data(cm_id_priv, NULL, 0);
-error_free_msg:
- cm_free_msg(msg);
error_unlock:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
return ret;
}
-EXPORT_SYMBOL(ib_send_cm_mra);
+EXPORT_SYMBOL(ib_prepare_cm_mra);
static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
@@ -3377,7 +3339,6 @@ static int cm_lap_handler(struct cm_work *work)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_OTHER,
- cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
@@ -3786,7 +3747,8 @@ static void cm_process_send_error(struct cm_id_private *cm_id_priv,
spin_lock_irq(&cm_id_priv->lock);
if (msg != cm_id_priv->msg) {
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_priv_msg(msg);
+ cm_free_msg(msg);
+ cm_deref_id(cm_id_priv);
return;
}
cm_free_priv_msg(msg);
@@ -4378,7 +4340,7 @@ static int cm_add_one(struct ib_device *ib_device)
return -ENOMEM;
kref_init(&cm_dev->kref);
- spin_lock_init(&cm_dev->mad_agent_lock);
+ rwlock_init(&cm_dev->mad_agent_lock);
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
cm_dev->going_down = 0;
@@ -4494,9 +4456,9 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
* The above ensures no call paths from the work are running,
* the remaining paths all take the mad_agent_lock.
*/
- spin_lock(&cm_dev->mad_agent_lock);
+ write_lock(&cm_dev->mad_agent_lock);
port->mad_agent = NULL;
- spin_unlock(&cm_dev->mad_agent_lock);
+ write_unlock(&cm_dev->mad_agent_lock);
ib_unregister_mad_agent(mad_agent);
ib_port_unregister_client_groups(ib_device, i,
cm_counter_groups);
diff --git a/drivers/infiniband/core/cm_trace.h b/drivers/infiniband/core/cm_trace.h
index 944d9071245d..4a4987da69d4 100644
--- a/drivers/infiniband/core/cm_trace.h
+++ b/drivers/infiniband/core/cm_trace.h
@@ -229,7 +229,7 @@ DEFINE_CM_ERR_EVENT(send_drep);
DEFINE_CM_ERR_EVENT(dreq_unknown);
DEFINE_CM_ERR_EVENT(send_unknown_rej);
DEFINE_CM_ERR_EVENT(rej_unknown);
-DEFINE_CM_ERR_EVENT(send_mra_unknown);
+DEFINE_CM_ERR_EVENT(prepare_mra_unknown);
DEFINE_CM_ERR_EVENT(mra_unknown);
DEFINE_CM_ERR_EVENT(qp_init);
DEFINE_CM_ERR_EVENT(qp_rtr);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ab31eefa916b..9b471548e7ae 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -46,7 +46,6 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_MAX_CM_RETRIES 15
-#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 16
#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
@@ -146,19 +145,6 @@ struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
}
EXPORT_SYMBOL(rdma_iw_cm_id);
-/**
- * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
- * @res: rdma resource tracking entry pointer
- */
-struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
-{
- struct rdma_id_private *id_priv =
- container_of(res, struct rdma_id_private, res);
-
- return &id_priv->id;
-}
-EXPORT_SYMBOL(rdma_res_to_id);
-
static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
@@ -2214,8 +2200,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
case IB_CM_REP_RECEIVED:
if (state == RDMA_CM_CONNECT &&
(id_priv->id.qp_type != IB_QPT_UD)) {
- trace_cm_send_mra(id_priv);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(id_priv);
+ ib_prepare_cm_mra(cm_id);
}
if (id_priv->id.qp) {
event.status = cma_rep_recv(id_priv);
@@ -2476,8 +2462,8 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
conn_id->id.qp_type != IB_QPT_UD) {
- trace_cm_send_mra(cm_id->context);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(cm_id->context);
+ ib_prepare_cm_mra(cm_id);
}
mutex_unlock(&conn_id->handler_mutex);
@@ -5245,7 +5231,8 @@ static int cma_netevent_callback(struct notifier_block *self,
neigh->ha, ETH_ALEN))
continue;
cma_id_get(current_id);
- queue_work(cma_wq, &current_id->id.net_work);
+ if (!queue_work(cma_wq, &current_id->id.net_work))
+ cma_id_put(current_id);
}
out:
spin_unlock_irqrestore(&id_table_lock, flags);
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
index dc622f3778be..3456d5f3aa47 100644
--- a/drivers/infiniband/core/cma_trace.h
+++ b/drivers/infiniband/core/cma_trace.h
@@ -55,7 +55,7 @@ DECLARE_EVENT_CLASS(cma_fsm_class,
DEFINE_CMA_FSM_EVENT(send_rtu);
DEFINE_CMA_FSM_EVENT(send_rej);
-DEFINE_CMA_FSM_EVENT(send_mra);
+DEFINE_CMA_FSM_EVENT(prepare_mra);
DEFINE_CMA_FSM_EVENT(send_sidr_req);
DEFINE_CMA_FSM_EVENT(send_sidr_rep);
DEFINE_CMA_FSM_EVENT(disconnect);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index f4486cbd8f45..62410578dec3 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -368,12 +368,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
/*
* CM_ID <-- DESTROYING
*
- * Clean up all resources associated with the connection and release
- * the initial reference taken by iw_create_cm_id.
- *
- * Returns true if and only if the last cm_id_priv reference has been dropped.
+ * Clean up all resources associated with the connection.
*/
-static bool destroy_cm_id(struct iw_cm_id *cm_id)
+static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
struct ib_qp *qp;
@@ -442,20 +439,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
}
-
- return iwcm_deref_id(cm_id_priv);
}
/*
- * This function is only called by the application thread and cannot
- * be called by the event thread. The function will wait for all
- * references to be released on the cm_id and then kfree the cm_id
- * object.
+ * Destroy cm_id. If the cm_id still has other references, wait for all
+ * references to be released on the cm_id and then release the initial
+ * reference taken by iw_create_cm_id.
*/
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{
- if (!destroy_cm_id(cm_id))
+ struct iwcm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ destroy_cm_id(cm_id);
+ if (refcount_read(&cm_id_priv->refcount) > 1)
flush_workqueue(iwcm_wq);
+ iwcm_deref_id(cm_id_priv);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -1035,8 +1034,10 @@ static void cm_work_handler(struct work_struct *_work)
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
ret = process_event(cm_id_priv, &levent);
- if (ret)
- WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
+ if (ret) {
+ destroy_cm_id(&cm_id_priv->id);
+ WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
+ }
} else
pr_debug("dropping event %d\n", levent.event);
if (iwcm_deref_id(cm_id_priv))
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 8af0619a39cd..b4b10e8a6495 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -158,7 +158,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah))
- return (void *) ah;
+ return ERR_CAST(ah);
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index c48ef6083020..c752ae9fad6c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -41,67 +41,72 @@
#include <linux/hugetlb.h>
#include <linux/interval_tree.h>
#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
#include <linux/pagemap.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
-static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
- const struct mmu_interval_notifier_ops *ops)
+static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp)
{
- int ret;
+ umem_odp->is_implicit_odp = 1;
+ umem_odp->umem.is_odp = 1;
+ mutex_init(&umem_odp->umem_mutex);
+}
+
+static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ const struct mmu_interval_notifier_ops *ops)
+{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+ size_t page_size = 1UL << umem_odp->page_shift;
+ struct hmm_dma_map *map;
+ unsigned long start;
+ unsigned long end;
+ size_t nr_entries;
+ int ret = 0;
umem_odp->umem.is_odp = 1;
mutex_init(&umem_odp->umem_mutex);
- if (!umem_odp->is_implicit_odp) {
- size_t page_size = 1UL << umem_odp->page_shift;
- unsigned long start;
- unsigned long end;
- size_t ndmas, npfns;
-
- start = ALIGN_DOWN(umem_odp->umem.address, page_size);
- if (check_add_overflow(umem_odp->umem.address,
- (unsigned long)umem_odp->umem.length,
- &end))
- return -EOVERFLOW;
- end = ALIGN(end, page_size);
- if (unlikely(end < page_size))
- return -EOVERFLOW;
-
- ndmas = (end - start) >> umem_odp->page_shift;
- if (!ndmas)
- return -EINVAL;
-
- npfns = (end - start) >> PAGE_SHIFT;
- umem_odp->pfn_list = kvcalloc(
- npfns, sizeof(*umem_odp->pfn_list),
- GFP_KERNEL | __GFP_NOWARN);
- if (!umem_odp->pfn_list)
- return -ENOMEM;
-
- umem_odp->dma_list = kvcalloc(
- ndmas, sizeof(*umem_odp->dma_list),
- GFP_KERNEL | __GFP_NOWARN);
- if (!umem_odp->dma_list) {
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+ if (check_add_overflow(umem_odp->umem.address,
+ (unsigned long)umem_odp->umem.length, &end))
+ return -EOVERFLOW;
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
+ return -EOVERFLOW;
+
+ nr_entries = (end - start) >> PAGE_SHIFT;
+ if (!(nr_entries * PAGE_SIZE / page_size))
+ return -EINVAL;
+
+ map = &umem_odp->map;
+ if (ib_uses_virt_dma(dev)) {
+ map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!map->pfn_list)
ret = -ENOMEM;
- goto out_pfn_list;
- }
+ } else
+ ret = hmm_dma_map_alloc(dev->dma_device, map,
+ (end - start) >> PAGE_SHIFT,
+ 1 << umem_odp->page_shift);
+ if (ret)
+ return ret;
- ret = mmu_interval_notifier_insert(&umem_odp->notifier,
- umem_odp->umem.owning_mm,
- start, end - start, ops);
- if (ret)
- goto out_dma_list;
- }
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm, start,
+ end - start, ops);
+ if (ret)
+ goto out_free_map;
return 0;
-out_dma_list:
- kvfree(umem_odp->dma_list);
-out_pfn_list:
- kvfree(umem_odp->pfn_list);
+out_free_map:
+ if (ib_uses_virt_dma(dev))
+ kfree(map->pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, map);
return ret;
}
@@ -120,7 +125,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
{
struct ib_umem *umem;
struct ib_umem_odp *umem_odp;
- int ret;
if (access & IB_ACCESS_HUGETLB)
return ERR_PTR(-EINVAL);
@@ -132,16 +136,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
umem->ibdev = device;
umem->writable = ib_access_writable(access);
umem->owning_mm = current->mm;
- umem_odp->is_implicit_odp = 1;
umem_odp->page_shift = PAGE_SHIFT;
umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- ret = ib_init_umem_odp(umem_odp, NULL);
- if (ret) {
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
- return ERR_PTR(ret);
- }
+ ib_init_umem_implicit_odp(umem_odp);
return umem_odp;
}
EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
@@ -262,74 +260,41 @@ err_put_pid:
}
EXPORT_SYMBOL(ib_umem_odp_get);
-void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
+static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+
/*
* Ensure that no more pages are mapped in the umem.
*
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- if (!umem_odp->is_implicit_odp) {
- mutex_lock(&umem_odp->umem_mutex);
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- mutex_unlock(&umem_odp->umem_mutex);
- mmu_interval_notifier_remove(&umem_odp->notifier);
- kvfree(umem_odp->dma_list);
- kvfree(umem_odp->pfn_list);
- }
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
+ mutex_lock(&umem_odp->umem_mutex);
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
+ mutex_unlock(&umem_odp->umem_mutex);
+ mmu_interval_notifier_remove(&umem_odp->notifier);
+ if (ib_uses_virt_dma(dev))
+ kfree(umem_odp->map.pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, &umem_odp->map);
}
-EXPORT_SYMBOL(ib_umem_odp_release);
-/*
- * Map for DMA and insert a single page into the on-demand paging page tables.
- *
- * @umem: the umem to insert the page to.
- * @dma_index: index in the umem to add the dma to.
- * @page: the page struct to map and add.
- * @access_mask: access permissions needed for this page.
- *
- * The function returns -EFAULT if the DMA mapping operation fails.
- *
- */
-static int ib_umem_odp_map_dma_single_page(
- struct ib_umem_odp *umem_odp,
- unsigned int dma_index,
- struct page *page,
- u64 access_mask)
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_device *dev = umem_odp->umem.ibdev;
- dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
-
- if (*dma_addr) {
- /*
- * If the page is already dma mapped it means it went through
- * a non-invalidating trasition, like read-only to writable.
- * Resync the flags.
- */
- *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
- return 0;
- }
+ if (!umem_odp->is_implicit_odp)
+ ib_umem_odp_free(umem_odp);
- *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
- DMA_BIDIRECTIONAL);
- if (ib_dma_mapping_error(dev, *dma_addr)) {
- *dma_addr = 0;
- return -EFAULT;
- }
- umem_odp->npages++;
- *dma_addr |= access_mask;
- return 0;
+ put_pid(umem_odp->tgid);
+ kfree(umem_odp);
}
+EXPORT_SYMBOL(ib_umem_odp_release);
/**
* ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
*
* Maps the range passed in the argument to DMA addresses.
- * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
* Upon success the ODP MR will be locked to let caller complete its device
* page table update.
*
@@ -357,9 +322,6 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
struct hmm_range range = {};
unsigned long timeout;
- if (access_mask == 0)
- return -EINVAL;
-
if (user_virt < ib_umem_start(umem_odp) ||
user_virt + bcnt > ib_umem_end(umem_odp))
return -EFAULT;
@@ -385,11 +347,11 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
if (fault) {
range.default_flags = HMM_PFN_REQ_FAULT;
- if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ if (access_mask & HMM_PFN_WRITE)
range.default_flags |= HMM_PFN_REQ_WRITE;
}
- range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
+ range.hmm_pfns = &(umem_odp->map.pfn_list[pfn_start_idx]);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
@@ -417,22 +379,17 @@ retry:
for (pfn_index = 0; pfn_index < num_pfns;
pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
- if (fault) {
- /*
- * Since we asked for hmm_range_fault() to populate
- * pages it shouldn't return an error entry on success.
- */
- WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
- WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
- } else {
- if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
- WARN_ON(umem_odp->dma_list[dma_index]);
- continue;
- }
- access_mask = ODP_READ_ALLOWED_BIT;
- if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
- }
+ /*
+ * Since we asked for hmm_range_fault() to populate
+ * pages it shouldn't return an error entry on success.
+ */
+ WARN_ON(fault && range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
+ WARN_ON(fault && !(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
+ if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID))
+ continue;
+
+ if (range.hmm_pfns[pfn_index] & HMM_PFN_DMA_MAPPED)
+ continue;
hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
/* If a hugepage was detected and ODP wasn't set for, the umem
@@ -445,15 +402,6 @@ retry:
__func__, hmm_order, page_shift);
break;
}
-
- ret = ib_umem_odp_map_dma_single_page(
- umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
- access_mask);
- if (ret < 0) {
- ibdev_dbg(umem_odp->umem.ibdev,
- "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
- break;
- }
}
/* upon success lock should stay on hold for the callee */
if (!ret)
@@ -473,45 +421,38 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
- dma_addr_t dma_addr;
- dma_addr_t dma;
- int idx;
- u64 addr;
struct ib_device *dev = umem_odp->umem.ibdev;
+ u64 addr;
lockdep_assert_held(&umem_odp->umem_mutex);
virt = max_t(u64, virt, ib_umem_start(umem_odp));
bound = min_t(u64, bound, ib_umem_end(umem_odp));
for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
- idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- dma = umem_odp->dma_list[idx];
-
- /* The access flags guaranteed a valid DMA address in case was NULL */
- if (dma) {
- unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
- struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
-
- dma_addr = dma & ODP_DMA_ADDR_MASK;
- ib_dma_unmap_page(dev, dma_addr,
- BIT(umem_odp->page_shift),
- DMA_BIDIRECTIONAL);
- if (dma & ODP_WRITE_ALLOWED_BIT) {
- struct page *head_page = compound_head(page);
- /*
- * set_page_dirty prefers being called with
- * the page lock. However, MMU notifiers are
- * called sometimes with and sometimes without
- * the lock. We rely on the umem_mutex instead
- * to prevent other mmu notifiers from
- * continuing and allowing the page mapping to
- * be removed.
- */
- set_page_dirty(head_page);
- }
- umem_odp->dma_list[idx] = 0;
- umem_odp->npages--;
+ u64 offset = addr - ib_umem_start(umem_odp);
+ size_t idx = offset >> umem_odp->page_shift;
+ unsigned long pfn = umem_odp->map.pfn_list[idx];
+
+ if (!hmm_dma_unmap_pfn(dev->dma_device, &umem_odp->map, idx))
+ goto clear;
+
+ if (pfn & HMM_PFN_WRITE) {
+ struct page *page = hmm_pfn_to_page(pfn);
+ struct page *head_page = compound_head(page);
+ /*
+ * set_page_dirty prefers being called with
+ * the page lock. However, MMU notifiers are
+ * called sometimes with and sometimes without
+ * the lock. We rely on the umem_mutex instead
+ * to prevent other mmu notifiers from
+ * continuing and allowing the page mapping to
+ * be removed.
+ */
+ set_page_dirty(head_page);
}
+ umem_odp->npages--;
+clear:
+ umem_odp->map.pfn_list[idx] &= ~HMM_PFN_FLAGS;
}
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3c3bb670c805..bc9fe3ceca4d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -193,7 +193,7 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
fd, attrs);
if (IS_ERR(uobj))
- return (void *)uobj;
+ return ERR_CAST(uobj);
uverbs_uobject_get(uobj);
uobj_put_read(uobj);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c5e78bbefbd0..75fde0fe9989 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -572,7 +572,7 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
GFP_KERNEL : GFP_ATOMIC);
if (IS_ERR(slave)) {
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
- return (void *)slave;
+ return ERR_CAST(slave);
}
ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
rdma_lag_put_ah_roce_slave(slave);
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
index af91d16c3c77..e632f1661b92 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.c
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -170,6 +170,9 @@ static int map_cc_config_offset_gen0_ext0(u32 offset, struct bnxt_qplib_cc_param
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
*val = ccparam->tcp_cp;
break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ *val = ccparam->inact_th;
+ break;
default:
return -EINVAL;
}
@@ -203,7 +206,7 @@ static ssize_t bnxt_re_cc_config_get(struct file *filp, char __user *buffer,
return simple_read_from_buffer(buffer, usr_buf_len, ppos, (u8 *)(buf), rc);
}
-static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
+static int bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
{
u32 modify_mask;
@@ -247,7 +250,9 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
ccparam->tcp_cp = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE:
+ return -EOPNOTSUPP;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ ccparam->inact_th = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE:
ccparam->time_pph = val;
@@ -258,17 +263,20 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
}
ccparam->mask = modify_mask;
+ return 0;
}
static int bnxt_re_configure_cc(struct bnxt_re_dev *rdev, u32 gen_ext, u32 offset, u32 val)
{
struct bnxt_qplib_cc_param ccparam = { };
+ int rc;
- /* Supporting only Gen 0 now */
- if (gen_ext == CC_CONFIG_GEN0_EXT0)
- bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
- else
- return -EINVAL;
+ if (gen_ext != CC_CONFIG_GEN0_EXT0)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
+ if (rc)
+ return rc;
bnxt_qplib_modify_cc(&rdev->qplib_res, &ccparam);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 457eecb99f96..be34c605d516 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1113,7 +1113,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
- if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
+ if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
req.qp_flags = cpu_to_le32(qp_flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index f231e886ad9d..9efd32a3dc55 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -846,7 +846,12 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
req.resp_addr = cpu_to_le64(sbuf.dma_addr);
- req.function_id = cpu_to_le32(fid);
+ if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf)
+ req.function_id =
+ cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID |
+ (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT));
+ else
+ req.function_id = cpu_to_le32(fid);
req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index b6e3141253c4..d6dde762921a 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -124,7 +124,6 @@ struct opa_mad_notice_attr {
} __packed ntc_2048;
};
- u8 class_data[];
};
#define IB_VLARB_LOWPRI_0_31 1
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 5a91cbda4aee..764286da2ce8 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1361,16 +1361,6 @@ void sc_flush(struct send_context *sc)
sc_wait_for_packet_egress(sc, 1);
}
-/* drop all packets on the context, no waiting until they are sent */
-void sc_drop(struct send_context *sc)
-{
- if (!sc)
- return;
-
- dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
- __func__, sc->sw_index, sc->hw_context);
-}
-
/*
* Start the software reaction to a context halt or SPC freeze:
* - mark the context as halted or frozen
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index d07cc6ea7c63..ab0f9a3a8d12 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -246,7 +246,6 @@ void sc_disable(struct send_context *sc);
int sc_restart(struct send_context *sc);
void sc_return_credits(struct send_context *sc);
void sc_flush(struct send_context *sc);
-void sc_drop(struct send_context *sc);
void sc_stop(struct send_context *sc, int bit);
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
pio_release_cb cb, void *arg);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 0d2b39b7c8b5..16a749d16ee9 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1521,24 +1521,6 @@ void sdma_all_running(struct hfi1_devdata *dd)
}
/**
- * sdma_all_idle() - called when the link goes down
- * @dd: hfi1_devdata
- *
- * This routine moves all engines to the idle state.
- */
-void sdma_all_idle(struct hfi1_devdata *dd)
-{
- struct sdma_engine *sde;
- unsigned int i;
-
- /* idle all engines */
- for (i = 0; i < dd->num_sdma; ++i) {
- sde = &dd->per_sdma[i];
- sdma_process_event(sde, sdma_event_e70_go_idle);
- }
-}
-
-/**
* sdma_start() - called to kick off state processing for all engines
* @dd: hfi1_devdata
*
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index d77246b48434..91dfd5d0c419 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -373,7 +373,6 @@ void sdma_start(struct hfi1_devdata *dd);
void sdma_exit(struct hfi1_devdata *dd);
void sdma_clean(struct hfi1_devdata *dd, size_t num_engines);
void sdma_all_running(struct hfi1_devdata *dd);
-void sdma_all_idle(struct hfi1_devdata *dd);
void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
void sdma_freeze(struct hfi1_devdata *dd);
void sdma_unfreeze(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index cf2d29098406..62b4f16dab27 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -53,7 +53,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
int ret = 0;
fd->entry_to_rb = kcalloc(uctxt->expected_count,
- sizeof(struct rb_node *),
+ sizeof(*fd->entry_to_rb),
GFP_KERNEL);
if (!fd->entry_to_rb)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 7917af8e6380..baf592e6f21b 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(src)
hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 4fc5b9d5fea8..307c35888b30 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -33,7 +33,6 @@
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 560a1d9de408..1dcc9cbb4678 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1027,6 +1027,26 @@ struct hns_roce_dev {
atomic64_t *dfx_cnt;
};
+enum hns_roce_trace_type {
+ TRACE_SQ,
+ TRACE_RQ,
+ TRACE_SRQ,
+};
+
+static inline const char *trace_type_to_str(enum hns_roce_trace_type type)
+{
+ switch (type) {
+ case TRACE_SQ:
+ return "SQ";
+ case TRACE_RQ:
+ return "RQ";
+ case TRACE_SRQ:
+ return "SRQ";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
{
return container_of(ib_dev, struct hns_roce_dev, ib_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 160e8927d364..fa8747656f25 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -43,13 +43,15 @@
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
+#define CREATE_TRACE_POINTS
+#include "hns_roce_trace.h"
+
enum {
CMD_RST_PRC_OTHERS,
CMD_RST_PRC_SUCCESS,
@@ -738,6 +740,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
else
ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+ trace_hns_sq_wqe(qp->qpn, wqe_idx, wqe, 1 << qp->sq.wqe_shift,
+ wr->wr_id, TRACE_SQ);
if (unlikely(ret)) {
*bad_wr = wr;
goto out;
@@ -807,6 +811,9 @@ static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
+
+ trace_hns_rq_wqe(hr_qp->qpn, wqe_idx, wqe, 1 << hr_qp->rq.wqe_shift,
+ wr->wr_id, TRACE_RQ);
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
@@ -943,7 +950,7 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
static void update_srq_db(struct hns_roce_srq *srq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- struct hns_roce_v2_db db;
+ struct hns_roce_v2_db db = {};
hr_reg_write(&db, DB_TAG, srq->srqn);
hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
@@ -984,6 +991,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
fill_wqe_idx(srq, wqe_idx);
srq->wrid[wqe_idx] = wr->wr_id;
+
+ trace_hns_srq_wqe(srq->srqn, wqe_idx, wqe, 1 << srq->wqe_shift,
+ wr->wr_id, TRACE_SRQ);
}
if (likely(nreq)) {
@@ -1311,6 +1321,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
tail = csq->head;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_req(hr_dev, &desc[i]);
+
csq->desc[csq->head++] = desc[i];
if (csq->head == csq->desc_num)
csq->head = 0;
@@ -1325,6 +1337,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_resp(hr_dev, &csq->desc[tail]);
+
/* check the result of hardware write back */
desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
@@ -4302,8 +4316,7 @@ static inline int get_pdn(struct ib_pd *ib_pd)
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
- struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+ struct hns_roce_v2_qp_context *context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -5122,7 +5135,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
- modify_qp_reset_to_init(ibqp, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, context);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
@@ -5313,6 +5326,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ trace_hns_sq_flush_cqe(hr_qp->qpn, hr_qp->sq.head, TRACE_SQ);
hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
hr_qp->state = IB_QPS_ERR;
@@ -5322,6 +5336,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+ trace_hns_rq_flush_cqe(hr_qp->qpn, hr_qp->rq.head, TRACE_RQ);
hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
@@ -6248,6 +6263,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
eq->sub_type = sub_type;
++eq->cons_index;
aeqe_found = IRQ_HANDLED;
+ trace_hns_ae_info(event_type, aeqe, eq->eqe_size);
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE_CNT]);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 91a5665465ff..bc7466830eaf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -34,6 +34,7 @@
#define _HNS_ROCE_HW_V2_H
#include <linux/bitops.h>
+#include "hnae3.h"
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 8d0b63d4b50a..e7a497cc125c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -37,7 +37,6 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 09da3496843b..93a48b41955b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -38,6 +38,7 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
+#include "hns_roce_trace.h"
static u32 hw_index_to_key(int ind)
{
@@ -159,6 +160,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ trace_hns_mr(mr);
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
@@ -1146,6 +1148,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
+ trace_hns_buf_attr(buf_attr);
/* The caller has its own buffer list and invokes the hns_roce_mtr_map()
* to finish the MTT configuration.
*/
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 356d98816949..f637b73b946e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -4,7 +4,6 @@
#include <rdma/rdma_cm.h>
#include <rdma/restrack.h>
#include <uapi/rdma/rdma_netlink.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_trace.h b/drivers/infiniband/hw/hns/hns_roce_trace.h
new file mode 100644
index 000000000000..59ceb591b3a1
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_trace.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2025 Hisilicon Limited.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns_roce
+
+#if !defined(__HNS_ROCE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HNS_ROCE_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/string_choices.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+
+DECLARE_EVENT_CLASS(flush_head_template,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, pi)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->pi = pi;
+ __entry->type = type;
+ ),
+
+ TP_printk("%s 0x%lx flush head 0x%x.",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->pi)
+);
+
+DEFINE_EVENT(flush_head_template, hns_sq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+DEFINE_EVENT(flush_head_template, hns_rq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+
+#define MAX_SGE_PER_WQE 64
+#define MAX_WQE_SIZE (MAX_SGE_PER_WQE * HNS_ROCE_SGE_SIZE)
+DECLARE_EVENT_CLASS(wqe_template,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len,
+ u64 id, enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, idx)
+ __array(u32, wqe,
+ MAX_WQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ __field(u64, id)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->idx = idx;
+ __entry->id = id;
+ __entry->len = len / sizeof(__le32);
+ __entry->type = type;
+ for (int i = 0; i < __entry->len; i++)
+ __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]);
+ ),
+
+ TP_printk("%s 0x%lx wqe(0x%x/0x%llx): %s",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->idx, __entry->id,
+ __print_array(__entry->wqe, __entry->len,
+ sizeof(__le32)))
+);
+
+DEFINE_EVENT(wqe_template, hns_sq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_rq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_srq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+
+TRACE_EVENT(hns_ae_info,
+ TP_PROTO(int event_type, void *aeqe, unsigned int len),
+ TP_ARGS(event_type, aeqe, len),
+
+ TP_STRUCT__entry(__field(int, event_type)
+ __array(u32, aeqe,
+ HNS_ROCE_V3_EQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(__entry->event_type = event_type;
+ __entry->len = len / sizeof(__le32);
+ for (int i = 0; i < __entry->len; i++)
+ __entry->aeqe[i] = le32_to_cpu(((__le32 *)aeqe)[i]);
+ ),
+
+ TP_printk("event %2d aeqe: %s", __entry->event_type,
+ __print_array(__entry->aeqe, __entry->len, sizeof(__le32)))
+);
+
+TRACE_EVENT(hns_mr,
+ TP_PROTO(struct hns_roce_mr *mr),
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(__field(u64, iova)
+ __field(u64, size)
+ __field(u32, key)
+ __field(u32, pd)
+ __field(u32, pbl_hop_num)
+ __field(u32, npages)
+ __field(int, type)
+ __field(int, enabled)
+ ),
+
+ TP_fast_assign(__entry->iova = mr->iova;
+ __entry->size = mr->size;
+ __entry->key = mr->key;
+ __entry->pd = mr->pd;
+ __entry->pbl_hop_num = mr->pbl_hop_num;
+ __entry->npages = mr->npages;
+ __entry->type = mr->type;
+ __entry->enabled = mr->enabled;
+ ),
+
+ TP_printk("iova:0x%llx, size:%llu, key:%u, pd:%u, pbl_hop:%u, npages:%u, type:%d, status:%d",
+ __entry->iova, __entry->size, __entry->key,
+ __entry->pd, __entry->pbl_hop_num, __entry->npages,
+ __entry->type, __entry->enabled)
+);
+
+TRACE_EVENT(hns_buf_attr,
+ TP_PROTO(struct hns_roce_buf_attr *attr),
+ TP_ARGS(attr),
+
+ TP_STRUCT__entry(__field(unsigned int, region_count)
+ __field(unsigned int, region0_size)
+ __field(int, region0_hopnum)
+ __field(unsigned int, region1_size)
+ __field(int, region1_hopnum)
+ __field(unsigned int, region2_size)
+ __field(int, region2_hopnum)
+ __field(unsigned int, page_shift)
+ __field(bool, mtt_only)
+ ),
+
+ TP_fast_assign(__entry->region_count = attr->region_count;
+ __entry->region0_size = attr->region[0].size;
+ __entry->region0_hopnum = attr->region[0].hopnum;
+ __entry->region1_size = attr->region[1].size;
+ __entry->region1_hopnum = attr->region[1].hopnum;
+ __entry->region2_size = attr->region[2].size;
+ __entry->region2_hopnum = attr->region[2].hopnum;
+ __entry->page_shift = attr->page_shift;
+ __entry->mtt_only = attr->mtt_only;
+ ),
+
+ TP_printk("rg cnt:%u, pg_sft:0x%x, mtt_only:%s, rg 0 (sz:%u, hop:%u), rg 1 (sz:%u, hop:%u), rg 2 (sz:%u, hop:%u)\n",
+ __entry->region_count, __entry->page_shift,
+ str_yes_no(__entry->mtt_only),
+ __entry->region0_size, __entry->region0_hopnum,
+ __entry->region1_size, __entry->region1_hopnum,
+ __entry->region2_size, __entry->region2_hopnum)
+);
+
+DECLARE_EVENT_CLASS(cmdq,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc),
+
+ TP_STRUCT__entry(__string(dev_name, dev_name(hr_dev->dev))
+ __field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __array(u32, data, 6)
+ ),
+
+ TP_fast_assign(__assign_str(dev_name);
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ for (int i = 0; i < 6; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);
+ ),
+
+ TP_printk("%s cmdq opcode:0x%x, flag:0x%x, retval:0x%x, data:%s\n",
+ __get_str(dev_name), __entry->opcode,
+ __entry->flag, __entry->retval,
+ __print_array(__entry->data, 6, sizeof(__le32)))
+);
+
+DEFINE_EVENT(cmdq, hns_cmdq_req,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+DEFINE_EVENT(cmdq, hns_cmdq_resp,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+
+#endif /* __HNS_ROCE_TRACE_H */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns_roce_trace
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 6aed6169c07d..99a7f1a6c0b5 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -3131,7 +3131,7 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
ibdev_dbg(to_ibdev(cqp->dev),
- "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
+ "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n",
cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
(u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
return 0;
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index e7ce6840755f..37ce35cb10e7 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -108,7 +108,7 @@ static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
chunk->vaddr = sd_entry->u.bp.addr.va + offset;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
ibdev_dbg(to_ibdev(dev),
- "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
+ "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%p fpm_addr = %llx\n",
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
return 0;
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 0fc4e2679218..28e154bbb50f 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -15,14 +15,12 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
bool is_rnic_cq;
u32 doorbell;
u32 buf_size;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
cq->cq_handle = INVALID_MANA_HANDLE;
@@ -65,7 +63,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
return err;
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
}
if (is_rnic_cq) {
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index b31089320aa5..165c0a1e67d1 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -101,103 +101,95 @@ static int mana_ib_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
+ struct gdma_context *gc = madev->mdev->gdma_context;
+ struct mana_context *mc = gc->mana.driver_data;
struct gdma_dev *mdev = madev->mdev;
struct net_device *ndev;
- struct mana_context *mc;
struct mana_ib_dev *dev;
u8 mac_addr[ETH_ALEN];
int ret;
- mc = mdev->driver_data;
-
dev = ib_alloc_device(mana_ib_dev, ib_dev);
if (!dev)
return -ENOMEM;
ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
-
- dev->ib_dev.phys_port_cnt = mc->num_ports;
-
- ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
- mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
-
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
-
- /*
- * num_comp_vectors needs to set to the max MSIX index
- * when interrupts and event queues are implemented
- */
- dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
- dev->ib_dev.dev.parent = mdev->gdma_context->dev;
-
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- if (!ndev) {
- ret = -ENODEV;
- ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
- goto free_ib_device;
- }
- ether_addr_copy(mac_addr, ndev->dev_addr);
- addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
- ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
- goto free_ib_device;
- }
-
- ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
- ret);
- goto free_ib_device;
- }
- dev->gdma_dev = &mdev->gdma_context->mana_ib;
-
- dev->nb.notifier_call = mana_ib_netdev_event;
- ret = register_netdevice_notifier(&dev->nb);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
- ret);
- goto deregister_device;
- }
-
- ret = mana_ib_gd_query_adapter_caps(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
- ret);
- goto deregister_net_notifier;
- }
-
- ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
-
- ret = mana_ib_create_eqs(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
- goto deregister_net_notifier;
- }
-
- ret = mana_ib_gd_create_rnic_adapter(dev);
- if (ret)
- goto destroy_eqs;
-
+ dev->ib_dev.num_comp_vectors = gc->max_num_queues;
+ dev->ib_dev.dev.parent = gc->dev;
+ dev->gdma_dev = mdev;
xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ);
- ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
- ret);
- goto destroy_rnic;
+
+ if (mana_ib_is_rnic(dev)) {
+ dev->ib_dev.phys_port_cnt = 1;
+ ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
+ if (!ndev) {
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
+ goto free_ib_device;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto free_ib_device;
+ }
+
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
+ ret);
+ goto free_ib_device;
+ }
+
+ ret = mana_ib_gd_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+
+ ret = mana_ib_create_eqs(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ret = mana_ib_gd_create_rnic_adapter(dev);
+ if (ret)
+ goto destroy_eqs;
+
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ goto destroy_rnic;
+ }
+ } else {
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+ ret = mana_eth_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret);
+ goto free_ib_device;
+ }
}
- dev->av_pool = dma_pool_create("mana_ib_av", mdev->gdma_context->dev,
- MANA_AV_BUFFER_SIZE, MANA_AV_BUFFER_SIZE, 0);
+ dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE,
+ MANA_AV_BUFFER_SIZE, 0);
if (!dev->av_pool) {
ret = -ENOMEM;
goto destroy_rnic;
}
- ret = ib_register_device(&dev->ib_dev, "mana_%d",
- mdev->gdma_context->dev);
+ ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
+ mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
+
+ ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d",
+ gc->dev);
if (ret)
goto deallocate_pool;
@@ -208,15 +200,16 @@ static int mana_ib_probe(struct auxiliary_device *adev,
deallocate_pool:
dma_pool_destroy(dev->av_pool);
destroy_rnic:
- xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
- mana_ib_destroy_eqs(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_destroy_eqs(dev);
deregister_net_notifier:
- unregister_netdevice_notifier(&dev->nb);
-deregister_device:
- mana_gd_deregister_device(dev->gdma_dev);
+ if (mana_ib_is_rnic(dev))
+ unregister_netdevice_notifier(&dev->nb);
free_ib_device:
+ xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
return ret;
}
@@ -227,25 +220,24 @@ static void mana_ib_remove(struct auxiliary_device *adev)
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
+ if (mana_ib_is_rnic(dev)) {
+ mana_ib_gd_destroy_rnic_adapter(dev);
+ mana_ib_destroy_eqs(dev);
+ unregister_netdevice_notifier(&dev->nb);
+ }
xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
- mana_ib_destroy_eqs(dev);
- unregister_netdevice_notifier(&dev->nb);
- mana_gd_deregister_device(dev->gdma_dev);
ib_dealloc_device(&dev->ib_dev);
}
static const struct auxiliary_device_id mana_id_table[] = {
- {
- .name = "mana.rdma",
- },
+ { .name = "mana.rdma", },
+ { .name = "mana.eth", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mana_id_table);
static struct auxiliary_driver mana_driver = {
- .name = "rdma",
.probe = mana_ib_probe,
.remove = mana_ib_remove,
.id_table = mana_id_table,
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index eda9c5b971de..41a24a186f9d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -4,6 +4,7 @@
*/
#include "mana_ib.h"
+#include "linux/pci.h"
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
@@ -243,7 +244,6 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
struct mana_ib_queue *queue)
{
- struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue_spec spec = {};
int err;
@@ -252,7 +252,7 @@ int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
spec.type = type;
spec.monitor_avl_buf = false;
spec.queue_size = size;
- err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem);
+ err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
if (err)
return err;
/* take ownership into mana_ib from mana */
@@ -479,7 +479,7 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
{
unsigned long page_sz;
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -494,7 +494,7 @@ int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_ume
unsigned long page_sz;
/* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -551,6 +551,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct ib_port_attr attr;
int err;
@@ -560,10 +561,12 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
- if (port_num == 1) {
- immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ if (mana_ib_is_rnic(dev)) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
}
return 0;
@@ -572,12 +575,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- struct mana_ib_dev *dev = container_of(ibdev,
- struct mana_ib_dev, ib_dev);
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
memset(props, 0, sizeof(*props));
+ props->vendor_id = pdev->vendor;
+ props->vendor_part_id = dev->gdma_dev->dev_id.type;
props->max_mr_size = MANA_IB_MAX_MR_SIZE;
- props->page_size_cap = PAGE_SZ_BM;
+ props->page_size_cap = dev->adapter_caps.page_size_cap;
props->max_qp = dev->adapter_caps.max_qp_count;
props->max_qp_wr = dev->adapter_caps.max_qp_wr;
props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
@@ -596,6 +601,8 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
props->max_ah = INT_MAX;
props->max_pkeys = 1;
props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
+ if (!mana_ib_is_rnic(dev))
+ props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
return 0;
}
@@ -603,6 +610,7 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
if (!ndev)
@@ -623,7 +631,7 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_EDR;
props->pkey_tbl_len = 1;
- if (port == 1) {
+ if (mana_ib_is_rnic(dev)) {
props->gid_tbl_len = 16;
props->port_cap_flags = IB_PORT_CM_SUP;
props->ip_gids = true;
@@ -696,6 +704,41 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
caps->max_recv_sge_count = resp.max_recv_sge_count;
caps->feature_flags = resp.feature_flags;
+ caps->page_size_cap = PAGE_SZ_BM;
+ if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
+ caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
+
+ return 0;
+}
+
+int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
+{
+ struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
+ struct gdma_query_max_resources_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
+ sizeof(req), sizeof(resp));
+
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&dev->ib_dev,
+ "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
+ caps->max_cq_count = resp.max_cq;
+ caps->max_mr_count = resp.max_mst;
+ caps->max_pd_count = 0x6000;
+ caps->max_qp_wr = min_t(u32,
+ 0x100000 / GDMA_MAX_SQE_SIZE,
+ 0x100000 / GDMA_MAX_RQE_SIZE);
+ caps->max_send_sge_count = 30;
+ caps->max_recv_sge_count = 15;
+ caps->page_size_cap = PAGE_SZ_BM;
+
return 0;
}
@@ -740,7 +783,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
spec.eq.msix_index = 0;
- err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
if (err)
return err;
@@ -791,7 +834,7 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
req.hdr.req.msg_version = GDMA_MESSAGE_V2;
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.notify_eq_id = mdev->fatal_err_eq->id;
if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
@@ -816,7 +859,7 @@ int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
gc = mdev_to_gc(mdev);
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -843,7 +886,7 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_ADD;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -873,7 +916,7 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_REMOVE;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -896,7 +939,7 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = op;
copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
@@ -917,8 +960,11 @@ int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 do
struct mana_rnic_create_cq_req req = {};
int err;
+ if (!mdev->eqs)
+ return -EINVAL;
+
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.gdma_region = cq->queue.gdma_region;
req.eq_id = mdev->eqs[cq->comp_vector]->id;
@@ -950,7 +996,7 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
return 0;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.cq_handle = cq->cq_handle;
@@ -976,7 +1022,7 @@ int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1012,7 +1058,7 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.rc_qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -1035,7 +1081,7 @@ int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1070,7 +1116,7 @@ int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6903946677e5..42bebd6cd4f7 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -60,6 +60,7 @@ struct mana_ib_adapter_caps {
u32 max_recv_sge_count;
u32 max_inline_data_size;
u64 feature_flags;
+ u64 page_size_cap;
};
struct mana_ib_queue {
@@ -543,6 +544,11 @@ static inline void mana_put_qp_ref(struct mana_ib_qp *qp)
complete(&qp->free);
}
+static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB;
+}
+
static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
@@ -642,6 +648,7 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev);
int mana_ib_create_eqs(struct mana_ib_dev *mdev);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index f99557ec7767..6d974d0a8400 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -5,8 +5,8 @@
#include "mana_ib.h"
-#define VALID_MR_FLAGS \
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
+#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
@@ -24,6 +24,9 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags)
if (access_flags & IB_ACCESS_REMOTE_READ)
flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
+
return flags;
}
@@ -48,7 +51,10 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
req.gva.virtual_address = mr_params->gva.virtual_address;
req.gva.access_flags = mr_params->gva.access_flags;
break;
-
+ case GDMA_MR_TYPE_ZBVA:
+ req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
+ req.zbva.access_flags = mr_params->zbva.access_flags;
+ break;
default:
ibdev_dbg(&dev->ib_dev,
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
@@ -144,11 +150,18 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
- mr_params.mr_type = GDMA_MR_TYPE_GVA;
- mr_params.gva.dma_region_handle = dma_region_handle;
- mr_params.gva.virtual_address = iova;
- mr_params.gva.access_flags =
- mana_ib_verbs_to_gdma_access_flags(access_flags);
+ if (access_flags & IB_ZERO_BASED) {
+ mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
+ mr_params.zbva.dma_region_handle = dma_region_handle;
+ mr_params.zbva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ } else {
+ mr_params.mr_type = GDMA_MR_TYPE_GVA;
+ mr_params.gva.dma_region_handle = dma_region_handle;
+ mr_params.gva.virtual_address = iova;
+ mr_params.gva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ }
err = mana_ib_gd_create_mr(dev, mr, &mr_params);
if (err)
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index c928af58f38b..14fd7d6c54a2 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -635,7 +635,6 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
{
struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
- struct gdma_context *gc = mdev_to_gc(mdev);
u32 doorbell, queue_size;
int i, err;
@@ -654,7 +653,7 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto destroy_queues;
}
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
sizeof(struct ud_rq_shadow_wqe));
@@ -736,7 +735,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
req.qp_state = attr->qp_state;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 33f525b744f2..e279e69b9a51 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -43,7 +43,7 @@
#define MAX_VFS 80
#define MAX_PEND_REQS_PER_FUNC 4
-#define MAD_TIMEOUT_MS 2000
+#define MAD_TIMEOUT_SEC 2
#define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
#define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
@@ -270,7 +270,7 @@ static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -309,7 +309,7 @@ static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
for (i = 0; i < MAX_VFS; ++i)
clean_vf_mcast(ctx, i);
- end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
+ end = jiffies + secs_to_jiffies(MAD_TIMEOUT_SEC + 3);
do {
count = 0;
mutex_lock(&ctx->mcg_table_lock);
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 0ff9f18a71e8..680627f1de33 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1645,11 +1645,6 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
}
-enum {
- LEFTOVERS_MC,
- LEFTOVERS_UC,
-};
-
static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct ib_flow_attr *flow_attr,
@@ -1659,43 +1654,32 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
struct mlx5_ib_flow_handler *handler = NULL;
static struct {
- struct ib_flow_attr flow_attr;
struct ib_flow_spec_eth eth_flow;
- } leftovers_specs[] = {
- [LEFTOVERS_MC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {0x1} }
- }
- },
- [LEFTOVERS_UC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {} }
- }
- }
- };
+ struct ib_flow_attr flow_attr;
+ } leftovers_wc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_wc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = { 0x1 } } } };
- handler = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_MC].flow_attr,
- dst);
+ static struct {
+ struct ib_flow_spec_eth eth_flow;
+ struct ib_flow_attr flow_attr;
+ } leftovers_uc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_uc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = {} } } };
+
+ handler = create_flow_rule(dev, ft_prio, &leftovers_wc.flow_attr, dst);
if (!IS_ERR(handler) &&
flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
handler_ucast = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_UC].flow_attr,
- dst);
+ &leftovers_uc.flow_attr, dst);
if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d07cacaa0abd..ce7610740412 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -485,6 +485,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_1_200GBASE_CR1_KR1):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_HDR;
@@ -493,10 +497,18 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_2_400GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_800GAUI_4_800GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_XDR;
+ break;
default:
return -EINVAL;
}
@@ -4422,17 +4434,6 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
mlx5_core_native_port_num(dev->mdev) - 1);
}
-static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
-{
- dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
- return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
-}
-
-static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
-{
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
-}
-
static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
{
int err;
@@ -4662,9 +4663,6 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
@@ -4722,9 +4720,6 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index ace2df3e1d9f..fde859d207ae 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -351,6 +351,7 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_UPD_XLT_PD BIT(4)
#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
+#define MLX5_IB_UPD_XLT_DOWNGRADE BIT(7)
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
*
@@ -1005,7 +1006,6 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_ODP,
MLX5_IB_STAGE_COUNTERS,
MLX5_IB_STAGE_CONG_DEBUGFS,
- MLX5_IB_STAGE_UAR,
MLX5_IB_STAGE_BFREG,
MLX5_IB_STAGE_PRE_IB_REG_UMR,
MLX5_IB_STAGE_WHITELIST_UID,
@@ -1473,8 +1473,8 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags);
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags);
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
@@ -1495,8 +1495,11 @@ static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
{
return 0;
}
-static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags) {}
+static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ return -EOPNOTSUPP;
+}
static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 5fbebafc8774..6dd813bac5b2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -525,7 +525,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
ent->fill_to_high_water = false;
if (ent->pending)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
else
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
@@ -576,7 +576,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
"add keys command failed, err %d\n",
err);
queue_delayed_work(cache->wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
}
}
} else if (ent->mkeys_queue.ci > 2 * ent->limit) {
@@ -2051,7 +2051,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->in_use--;
if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(30 * 1000));
+ secs_to_jiffies(30));
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 86d8fa63bf69..eaa2f9f5f3a9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -34,6 +34,9 @@
#include <linux/kernel.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
+#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
+#include <linux/pci-p2pdma.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -158,41 +161,50 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
}
}
-static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
-{
- u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
-
- if (umem_dma & ODP_READ_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_READ;
- if (umem_dma & ODP_WRITE_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_WRITE;
-
- return mtt_entry;
-}
-
-static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+static int populate_mtt(__be64 *pas, size_t start, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- dma_addr_t pa;
+ bool downgrade = flags & MLX5_IB_UPD_XLT_DOWNGRADE;
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ struct ib_device *dev = odp->umem.ibdev;
size_t i;
if (flags & MLX5_IB_UPD_XLT_ZAP)
- return;
+ return 0;
for (i = 0; i < nentries; i++) {
- pa = odp->dma_list[idx + i];
- pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ unsigned long pfn = odp->map.pfn_list[start + i];
+ dma_addr_t dma_addr;
+
+ pfn = odp->map.pfn_list[start + i];
+ if (!(pfn & HMM_PFN_VALID))
+ /* ODP initialization */
+ continue;
+
+ dma_addr = hmm_dma_map_pfn(dev->dma_device, &odp->map,
+ start + i, &p2pdma_state);
+ if (ib_dma_mapping_error(dev, dma_addr))
+ return -EFAULT;
+
+ dma_addr |= MLX5_IB_MTT_READ;
+ if ((pfn & HMM_PFN_WRITE) && !downgrade)
+ dma_addr |= MLX5_IB_MTT_WRITE;
+
+ pas[i] = cpu_to_be64(dma_addr);
+ odp->npages++;
}
+ return 0;
}
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
populate_klm(xlt, idx, nentries, mr, flags);
+ return 0;
} else {
- populate_mtt(xlt, idx, nentries, mr, flags);
+ return populate_mtt(xlt, idx, nentries, mr, flags);
}
}
@@ -303,8 +315,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
* estimate the cost of another UMR vs. the cost of bigger
* UMR.
*/
- if (umem_odp->dma_list[idx] &
- (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) {
if (!in_block) {
blk_start_idx = idx;
in_block = 1;
@@ -687,7 +698,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
{
int page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- u64 access_mask;
+ u64 access_mask = 0;
u64 start_idx;
bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
@@ -695,12 +706,14 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+ if (flags & MLX5_PF_FLAGS_DOWNGRADE)
+ xlt_flags |= MLX5_IB_UPD_XLT_DOWNGRADE;
+
page_shift = odp->page_shift;
start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
- access_mask = ODP_READ_ALLOWED_BIT;
if (odp->umem.writable && !downgrade)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
if (np < 0)
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index d3dcc272200a..146d03ae40bd 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -21,8 +21,10 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
spin_lock_irqsave(&table->lock, flags);
common = radix_tree_lookup(&table->tree, rsn);
- if (common)
+ if (common && !common->invalid)
refcount_inc(&common->refcount);
+ else
+ common = NULL;
spin_unlock_irqrestore(&table->lock, flags);
@@ -178,6 +180,18 @@ static int create_resource_common(struct mlx5_ib_dev *dev,
return 0;
}
+static void modify_resource_common_state(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp,
+ bool invalid)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ qp->common.invalid = invalid;
+ spin_unlock_irqrestore(&table->lock, flags);
+}
+
static void destroy_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp)
{
@@ -609,8 +623,20 @@ err_destroy_rq:
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
{
+ int ret;
+
+ /* The rq destruction can be called again in case it fails, hence we
+ * mark the common resource as invalid and only once FW destruction
+ * is completed successfully we actually destroy the resources.
+ */
+ modify_resource_common_state(dev, rq, true);
+ ret = destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ if (ret) {
+ modify_resource_common_state(dev, rq, false);
+ return ret;
+ }
destroy_resource_common(dev, rq);
- return destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ return 0;
}
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 793f3c5c4d01..5be4426a2884 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -840,7 +840,17 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
size_to_map = npages * desc_size;
dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
- mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ /*
+ * npages is the maximum number of pages to map, but we
+ * can't guarantee that all pages are actually mapped.
+ *
+ * For example, if page is p2p of type which is not supported
+ * for mapping, the number of pages mapped will be less than
+ * requested.
+ */
+ err = mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ if (err)
+ return err;
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 192f83fd7c8a..dacb8ceeebe0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -144,7 +144,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index f948b76f984d..3fbf99757b11 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -56,7 +56,7 @@ static int usnic_uiom_dma_fault(struct iommu_domain *domain,
unsigned long iova, int flags,
void *token)
{
- usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
+ usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n",
dev_name(dev),
domain, iova, flags);
return -ENOSYS;
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index c180e7ebcfc5..1ed5b63f8afc 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
- depends on INET && PCI && INFINIBAND
+ depends on INET && PCI && INFINIBAND && 64BIT
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRC32
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index b248c68bf9b1..3a77d6db1720 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -101,6 +101,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_FLUSH;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC_WRITE;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0bc3fbb6554f..876702058c84 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -70,9 +70,9 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
void *addr, int length, enum rxe_mr_copy_dir dir);
int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
@@ -193,13 +193,16 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
/* rxe_odp.c */
extern const struct mmu_interval_notifier_ops rxe_mn_ops;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#if defined CONFIG_INFINIBAND_ON_DEMAND_PAGING
int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
u64 iova, int access_flags, struct rxe_mr *mr);
int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length);
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -212,9 +215,19 @@ static inline int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
{
return -EOPNOTSUPP;
}
-static inline int
+static inline enum resp_states
rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
+static inline int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ return -EOPNOTSUPP;
+}
+static inline enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr,
+ u64 iova, u64 value)
{
return RESPST_ERR_UNSUPPORTED_OPCODE;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 432d864c3ce9..bcb97b3ea58a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -424,7 +424,7 @@ err1:
return err;
}
-int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
+static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
{
unsigned int page_offset;
unsigned long index;
@@ -433,16 +433,6 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
int err;
u8 *va;
- /* mr must be valid even if length is zero */
- if (WARN_ON(!mr))
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- if (mr->ibmr.type == IB_MR_TYPE_DMA)
- return -EFAULT;
-
err = mr_check_range(mr, iova, length);
if (err)
return err;
@@ -454,7 +444,7 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
if (!page)
return -EFAULT;
bytes = min_t(unsigned int, length,
- mr_page_size(mr) - page_offset);
+ mr_page_size(mr) - page_offset);
va = kmap_local_page(page);
arch_wb_cache_pmem(va + page_offset, bytes);
@@ -468,11 +458,33 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
return 0;
}
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 start, unsigned int length)
+{
+ int err;
+
+ /* mr must be valid even if length is zero */
+ if (WARN_ON(!mr))
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA)
+ return -EFAULT;
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_flush_pmem_iova(mr, start, length);
+ else
+ err = rxe_mr_flush_pmem_iova(mr, start, length);
+
+ return err;
+}
+
/* Guarantee atomicity of atomic operations at the machine level. */
DEFINE_SPINLOCK(atomic_ops_lock);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
unsigned int page_offset;
struct page *page;
@@ -524,27 +536,15 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-#if defined CONFIG_64BIT
-/* only implemented or called for 64 bit architectures */
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
{
unsigned int page_offset;
struct page *page;
u64 *va;
- /* ODP is not supported right now. WIP. */
- if (is_odp_mr(mr))
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
- /* See IBA oA19-28 */
- if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state\n");
- return RESPST_ERR_RKEY_VIOLATION;
- }
-
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
page_offset = iova & (PAGE_SIZE - 1);
page = ib_virt_dma_to_page(iova);
@@ -572,20 +572,12 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
}
va = kmap_local_page(page);
-
/* Do atomic write after all prior operations have completed */
smp_store_release(&va[page_offset >> 3], value);
-
kunmap_local(va);
- return 0;
-}
-#else
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
-{
- return RESPST_ERR_UNSUPPORTED_OPCODE;
+ return RESPST_NONE;
}
-#endif
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index 9f6e2bb2a269..dbc5a5600eb7 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -4,6 +4,7 @@
*/
#include <linux/hmm.h>
+#include <linux/libnvdimm.h>
#include <rdma/ib_umem_odp.h>
@@ -26,7 +27,7 @@ static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni,
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
- /* update umem_odp->dma_list */
+ /* update umem_odp->map.pfn_list */
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
mutex_unlock(&umem_odp->umem_mutex);
@@ -44,12 +45,11 @@ static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcn
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool fault = !(flags & RXE_PAGEFAULT_SNAPSHOT);
- u64 access_mask;
+ u64 access_mask = 0;
int np;
- access_mask = ODP_READ_ALLOWED_BIT;
if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY))
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
/*
* ib_umem_odp_map_dma_and_lock() locks umem_mutex on success.
@@ -124,8 +124,8 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
return err;
}
-static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
- u64 iova, int length, u32 perm)
+static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova,
+ int length)
{
bool need_fault = false;
u64 addr;
@@ -137,7 +137,7 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
while (addr < iova + length) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- if (!(umem_odp->dma_list[idx] & perm)) {
+ if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) {
need_fault = true;
break;
}
@@ -147,23 +147,28 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
return need_fault;
}
+static unsigned long rxe_odp_iova_to_index(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+}
+
+static unsigned long rxe_odp_iova_to_page_offset(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return iova & (BIT(umem_odp->page_shift) - 1);
+}
+
static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u32 flags)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool need_fault;
- u64 perm;
int err;
if (unlikely(length < 1))
return -EINVAL;
- perm = ODP_READ_ALLOWED_BIT;
- if (!(flags & RXE_PAGEFAULT_RDONLY))
- perm |= ODP_WRITE_ALLOWED_BIT;
-
mutex_lock(&umem_odp->umem_mutex);
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault) {
mutex_unlock(&umem_odp->umem_mutex);
@@ -173,7 +178,7 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
if (err < 0)
return err;
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault)
return -EFAULT;
}
@@ -190,13 +195,13 @@ static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
size_t offset;
u8 *user_va;
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- offset = iova & (BIT(umem_odp->page_shift) - 1);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
while (length > 0) {
u8 *src, *dest;
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
user_va = kmap_local_page(page);
if (!user_va)
return -EFAULT;
@@ -255,8 +260,9 @@ int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return err;
}
-static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova,
+ int opcode, u64 compare,
+ u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
unsigned int page_offset;
@@ -277,9 +283,9 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return RESPST_ERR_RKEY_VIOLATION;
}
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- page_offset = iova & (BIT(umem_odp->page_shift) - 1);
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
if (!page)
return RESPST_ERR_RKEY_VIOLATION;
@@ -304,11 +310,11 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
int err;
@@ -324,3 +330,91 @@ int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return err;
}
+
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ unsigned int bytes;
+ int err;
+ u8 *va;
+
+ err = rxe_odp_map_range_and_lock(mr, iova, length,
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return err;
+
+ while (length > 0) {
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return -EFAULT;
+ }
+
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+
+ va = kmap_local_page(page);
+ arch_wb_cache_pmem(va + page_offset, bytes);
+ kunmap_local(va);
+
+ length -= bytes;
+ iova += bytes;
+ page_offset = 0;
+ }
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return 0;
+}
+
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ int err;
+ u64 *va;
+
+ /* See IBA oA19-28 */
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ err = rxe_odp_map_range_and_lock(mr, iova, sizeof(value),
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+ /* See IBA A19.4.2 */
+ if (unlikely(page_offset & 0x7)) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ rxe_dbg_mr(mr, "misaligned address\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+ /* Do atomic write after all prior operations have completed */
+ smp_store_release(&va[page_offset >> 3], value);
+ kunmap_local(va);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return RESPST_NONE;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 003f681e5dc0..767870568372 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -53,12 +53,9 @@ enum rxe_device_param {
| IB_DEVICE_MEM_WINDOW
| IB_DEVICE_FLUSH_GLOBAL
| IB_DEVICE_FLUSH_PERSISTENT
-#ifdef CONFIG_64BIT
| IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_ATOMIC_WRITE,
-#else
- | IB_DEVICE_MEM_WINDOW_TYPE_2B,
-#endif /* CONFIG_64BIT */
+
RXE_MAX_SGE = 32,
RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
sizeof(struct ib_sge) * RXE_MAX_SGE,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 7975fb0e2782..f2af3e0aef35 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -811,7 +811,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
spin_unlock_irqrestore(&qp->state_lock, flags);
qp->qp_timeout_jiffies = 0;
- if (qp_type(qp) == IB_QPT_RC) {
+ /* In the function timer_setup, .function is initialized. If .function
+ * is NULL, it indicates the function timer_setup is not called, the
+ * timer is not initialized. Or else, the timer is initialized.
+ */
+ if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function &&
+ qp->rnr_nak_timer.function) {
timer_delete_sync(&qp->retrans_timer);
timer_delete_sync(&qp->rnr_nak_timer);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5d9174e408db..711f73e0bbb1 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -649,10 +649,6 @@ static enum resp_states process_flush(struct rxe_qp *qp,
struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
- /* ODP is not supported right now. WIP. */
- if (is_odp_mr(mr))
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
/* oA19-14, oA19-15 */
if (res && res->replay)
return RESPST_ACKNOWLEDGE;
@@ -753,7 +749,16 @@ static enum resp_states atomic_write_reply(struct rxe_qp *qp,
value = *(u64 *)payload_addr(pkt);
iova = qp->resp.va + qp->resp.offset;
- err = rxe_mr_do_atomic_write(mr, iova, value);
+ /* See IBA oA19-28 */
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_do_atomic_write(mr, iova, value);
+ else
+ err = rxe_mr_do_atomic_write(mr, iova, value);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 80332638d9e3..6f8f353e9583 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -85,17 +85,17 @@ static bool is_done(struct rxe_task *task)
/* do_task is a wrapper for the three tasks (requester,
* completer, responder) and calls them in a loop until
- * they return a non-zero value. It is called either
- * directly by rxe_run_task or indirectly if rxe_sched_task
- * schedules the task. They must call __reserve_if_idle to
- * move the task to busy before calling or scheduling.
- * The task can also be moved to drained or invalid
- * by calls to rxe_cleanup_task or rxe_disable_task.
- * In that case tasks which get here are not executed but
- * just flushed. The tasks are designed to look to see if
- * there is work to do and then do part of it before returning
- * here with a return value of zero until all the work
- * has been consumed then it returns a non-zero value.
+ * they return a non-zero value. It is called indirectly
+ * when rxe_sched_task schedules the task. They must
+ * call __reserve_if_idle to move the task to busy before
+ * calling or scheduling. The task can also be moved to
+ * drained or invalid by calls to rxe_cleanup_task or
+ * rxe_disable_task. In that case tasks which get here
+ * are not executed but just flushed. The tasks are
+ * designed to look to see if there is work to do and
+ * then do part of it before returning here with a return
+ * value of zero until all the work has been consumed then
+ * it returns a non-zero value.
* The number of times the task can be run is limited by
* max iterations so one task cannot hold the cpu forever.
* If the limit is hit and work remains the task is rescheduled.
@@ -234,24 +234,6 @@ void rxe_cleanup_task(struct rxe_task *task)
spin_unlock_irqrestore(&task->lock, flags);
}
-/* run the task inline if it is currently idle
- * cannot call do_task holding the lock
- */
-void rxe_run_task(struct rxe_task *task)
-{
- unsigned long flags;
- bool run;
-
- WARN_ON(rxe_read(task->qp) <= 0);
-
- spin_lock_irqsave(&task->lock, flags);
- run = __reserve_if_idle(task);
- spin_unlock_irqrestore(&task->lock, flags);
-
- if (run)
- do_task(task);
-}
-
/* schedule the task to run later as a work queue entry.
* the queue_work call can be called holding
* the lock.
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index a63e258b3d66..a8c9a77b6027 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -47,8 +47,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
/* cleanup task */
void rxe_cleanup_task(struct rxe_task *task);
-void rxe_run_task(struct rxe_task *task);
-
void rxe_sched_task(struct rxe_task *task);
/* keep a task from scheduling */
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index d9e5a2e4c471..f5fd71717b80 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -718,7 +718,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index f3c2226aff94..25b3c741b66b 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -72,7 +72,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
wc->opcode = map_wc_opcode[cqe->opcode];
wc->status = map_cqe_status[cqe->status].ib;
siw_dbg_cq(cq,
- "idx %u, type %d, flags %2x, id 0x%pK\n",
+ "idx %u, type %d, flags %2x, id 0x%p\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
cqe->flags, (void *)(uintptr_t)cqe->id);
} else {
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index dcb963607c8b..d5ddeb17bd22 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -18,30 +18,6 @@
#define SIW_STAG_MAX_INDEX 0x00ffffff
/*
- * The code avoids special Stag of zero and tries to randomize
- * STag values between 1 and SIW_STAG_MAX_INDEX.
- */
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m)
-{
- struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX);
- u32 id, next;
-
- get_random_bytes(&next, 4);
- next &= SIW_STAG_MAX_INDEX;
-
- if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next,
- GFP_KERNEL) < 0)
- return -ENOMEM;
-
- /* Set the STag index part */
- m->stag = id << 8;
-
- siw_dbg_mem(m, "new MEM object\n");
-
- return 0;
-}
-
-/*
* siw_mem_id2obj()
*
* resolves memory from stag given by id. might be called from:
@@ -181,10 +157,10 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
*/
if (addr < mem->va || addr + len > mem->va + mem->len) {
siw_dbg_pd(pd, "MEM interval len %d\n", len);
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] out of bounds\n",
(void *)(uintptr_t)addr,
(void *)(uintptr_t)(addr + len));
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] STag=0x%08x\n",
(void *)(uintptr_t)mem->va,
(void *)(uintptr_t)(mem->va + mem->len),
mem->stag);
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index e74cfcd6dbc1..8e769d30e2ac 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -12,7 +12,6 @@ void siw_umem_release(struct siw_umem *umem);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
enum ib_access_flags perms, int len);
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 32554eba1eac..a10820e33887 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,7 +38,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
p = siw_get_upage(umem, dest_addr);
if (unlikely(!p)) {
- pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
+ pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
__func__, qp_id(rx_qp(srx)),
(void *)(uintptr_t)dest_addr,
(void *)(uintptr_t)umem->fp_addr);
@@ -51,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
pg_off = dest_addr & ~PAGE_MASK;
bytes = min(len, (int)PAGE_SIZE - pg_off);
- siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
+ siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
dest = kmap_atomic(p);
rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -105,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
{
int rv;
- siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
+ siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
if (unlikely(rv)) {
- pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
+ pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
qp_id(rx_qp(srx)), __func__, len, kva, rv);
return rv;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index fd7b266a221b..2b2a7b8e93b0 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -936,7 +936,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
rv = -EINVAL;
break;
}
- siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
+ siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
sqe->opcode, sqe->flags,
(void *)(uintptr_t)sqe->id);
@@ -1102,7 +1102,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
siw_dbg_qp(qp, "error %d\n", rv);
*bad_wr = wr;
}
- return rv > 0 ? 0 : rv;
+ return rv;
}
int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
@@ -1332,7 +1332,7 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct siw_device *sdev = to_siw_dev(pd->device);
int rv;
- siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
+ siw_dbg_pd(pd, "start: 0x%p, va: 0x%p, len: %llu\n",
(void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
(unsigned long long)len);
@@ -1525,7 +1525,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
mem->len = base_mr->length;
mem->va = base_mr->iova;
siw_dbg_mem(mem,
- "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
+ "%llu bytes, start 0x%p, %u SLE to %u entries\n",
mem->len, (void *)(uintptr_t)mem->va, num_sle,
pbl->num_buf);
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index cd750f512dee..0a33d995d15d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -192,6 +192,7 @@ config MSM_IOMMU
If unsure, say N here.
source "drivers/iommu/amd/Kconfig"
+source "drivers/iommu/arm/Kconfig"
source "drivers/iommu/intel/Kconfig"
source "drivers/iommu/iommufd/Kconfig"
source "drivers/iommu/riscv/Kconfig"
@@ -199,7 +200,6 @@ source "drivers/iommu/riscv/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
- select DMAR_TABLE if INTEL_IOMMU
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
@@ -314,150 +314,6 @@ config APPLE_DART
Say Y here if you are using an Apple SoC.
-# ARM IOMMU support
-config ARM_SMMU
- tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on ARM64 || ARM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU if ARM
- help
- Support for implementations of the ARM System MMU architecture
- versions 1 and 2.
-
- Say Y here if your SoC includes an IOMMU device implementing
- the ARM SMMU architecture.
-
-config ARM_SMMU_LEGACY_DT_BINDINGS
- bool "Support the legacy \"mmu-masters\" devicetree bindings"
- depends on ARM_SMMU=y && OF
- help
- Support for the badly designed and deprecated "mmu-masters"
- devicetree bindings. This allows some DMA masters to attach
- to the SMMU but does not provide any support via the DMA API.
- If you're lucky, you might be able to get VFIO up and running.
-
- If you say Y here then you'll make me very sad. Instead, say N
- and move your firmware to the utopian future that was 2016.
-
-config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
- bool "Default to disabling bypass on ARM SMMU v1 and v2"
- depends on ARM_SMMU
- default y
- help
- Say Y here to (by default) disable bypass streams such that
- incoming transactions from devices that are not attached to
- an iommu domain will report an abort back to the device and
- will not be allowed to pass through the SMMU.
-
- Any old kernels that existed before this KConfig was
- introduced would default to _allowing_ bypass (AKA the
- equivalent of NO for this config). However the default for
- this option is YES because the old behavior is insecure.
-
- There are few reasons to allow unmatched stream bypass, and
- even fewer good ones. If saying YES here breaks your board
- you should work on fixing your board. This KConfig option
- is expected to be removed in the future and we'll simply
- hardcode the bypass disable in the code.
-
- NOTE: the kernel command line parameter
- 'arm-smmu.disable_bypass' will continue to override this
- config.
-
-config ARM_SMMU_MMU_500_CPRE_ERRATA
- bool "Enable errata workaround for CPRE in SMMU reset path"
- depends on ARM_SMMU
- default y
- help
- Say Y here (by default) to apply workaround to disable
- MMU-500's next-page prefetcher for sake of 4 known errata.
-
- Say N here only when it is sure that any errata related to
- prefetch enablement are not applicable on the platform.
- Refer silicon-errata.rst for info on errata IDs.
-
-config ARM_SMMU_QCOM
- def_tristate y
- depends on ARM_SMMU && ARCH_QCOM
- select QCOM_SCM
- help
- When running on a Qualcomm platform that has the custom variant
- of the ARM SMMU, this needs to be built into the SMMU driver.
-
-config ARM_SMMU_QCOM_DEBUG
- bool "ARM SMMU QCOM implementation defined debug support"
- depends on ARM_SMMU_QCOM=y
- help
- Support for implementation specific debug features in ARM SMMU
- hardware found in QTI platforms. This include support for
- the Translation Buffer Units (TBU) that can be used to obtain
- additional information when debugging memory management issues
- like context faults.
-
- Say Y here to enable debug for issues such as context faults
- or TLB sync timeouts which requires implementation defined
- register dumps.
-
-config ARM_SMMU_V3
- tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
- depends on ARM64
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select GENERIC_MSI_IRQ
- select IOMMUFD_DRIVER if IOMMUFD
- help
- Support for implementations of the ARM System MMU architecture
- version 3 providing translation support to a PCIe root complex.
-
- Say Y here if your system includes an IOMMU device implementing
- the ARM SMMUv3 architecture.
-
-if ARM_SMMU_V3
-config ARM_SMMU_V3_SVA
- bool "Shared Virtual Addressing support for the ARM SMMUv3"
- select IOMMU_SVA
- select IOMMU_IOPF
- select MMU_NOTIFIER
- help
- Support for sharing process address spaces with devices using the
- SMMUv3.
-
- Say Y here if your system supports SVA extensions such as PCIe PASID
- and PRI.
-
-config ARM_SMMU_V3_IOMMUFD
- bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
- depends on IOMMUFD
- help
- Support for IOMMUFD features intended to support virtual machines
- with accelerated virtual IOMMUs.
-
- Say Y here if you are doing development and testing on this feature.
-
-config ARM_SMMU_V3_KUNIT_TEST
- tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
- depends on KUNIT
- depends on ARM_SMMU_V3_SVA
- default KUNIT_ALL_TESTS
- help
- Enable this option to unit-test arm-smmu-v3 driver functions.
-
- If unsure, say N.
-
-config TEGRA241_CMDQV
- bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
- depends on ACPI
- help
- Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
- CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
- support, except with virtualization capabilities.
-
- Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
- CMDQ-V extension.
-endif
-
config S390_IOMMU
def_bool y if S390 && PCI
depends on S390 && PCI
@@ -494,18 +350,6 @@ config MTK_IOMMU_V1
if unsure, say N here.
-config QCOM_IOMMU
- # Note: iommu drivers cannot (yet?) be built as modules
- bool "Qualcomm IOMMU Support"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select QCOM_SCM
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU
- help
- Support for IOMMU on certain Qualcomm SoCs.
-
config HYPERV_IOMMU
bool "Hyper-V IRQ Handling"
depends on HYPERV && X86
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 5e5a83c6c2aa..355294fa9033 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += amd/ intel/ arm/ iommufd/ riscv/
+obj-y += arm/ iommufd/
+obj-$(CONFIG_AMD_IOMMU) += amd/
+obj-$(CONFIG_INTEL_IOMMU) += intel/
+obj-$(CONFIG_RISCV_IOMMU) += riscv/
obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu-pages.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index 9de33b2d42f5..59c04a67f398 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
+obj-y += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 220c598b7e14..29a8864381c3 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -147,6 +147,8 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
}
+bool amd_iommu_ht_range_ignore(void);
+
/*
* This must be called after device probe completes. During probe
* use rlookup_amd_iommu() get the iommu.
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5089b58e528a..ccbab3a4811a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -29,8 +29,6 @@
* some size calculation constants
*/
#define DEV_TABLE_ENTRY_SIZE 32
-#define ALIAS_TABLE_ENTRY_SIZE 2
-#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET 0x00
@@ -111,6 +109,7 @@
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
+#define FEATURE_HT_RANGE_IGNORE BIT_ULL(11)
#define FEATURE_NUM_INT_REMAP_SUP GENMASK_ULL(9, 8)
#define FEATURE_NUM_INT_REMAP_SUP_2K(x) \
@@ -316,6 +315,7 @@
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_REMAP_ENABLE 1ULL
+#define DTE_INTTAB_ALIGNMENT 128
#define DTE_INTTABLEN_MASK (0xfULL << 1)
#define DTE_INTTABLEN_VALUE_512 9ULL
#define DTE_INTTABLEN_512 (DTE_INTTABLEN_VALUE_512 << 1)
@@ -616,12 +616,6 @@ struct amd_iommu_pci_seg {
/* Size of the device table */
u32 dev_table_size;
- /* Size of the alias table */
- u32 alias_table_size;
-
- /* Size of the rlookup table */
- u32 rlookup_table_size;
-
/*
* device table virtual address
*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 14aa0d77df26..c06b62f87b9b 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -243,17 +243,14 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
-static inline unsigned long tbl_size(int entry_size, int last_bdf)
+int amd_iommu_get_num_iommus(void)
{
- unsigned shift = PAGE_SHIFT +
- get_order((last_bdf + 1) * entry_size);
-
- return 1UL << shift;
+ return amd_iommus_present;
}
-int amd_iommu_get_num_iommus(void)
+bool amd_iommu_ht_range_ignore(void)
{
- return amd_iommus_present;
+ return check_feature2(FEATURE_HT_RANGE_IGNORE);
}
/*
@@ -634,8 +631,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
/* Allocate per PCI segment device table */
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
+ pci_seg->dev_table_size);
if (!pci_seg->dev_table)
return -ENOMEM;
@@ -644,16 +641,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = NULL;
}
/* Allocate per PCI segment IOMMU rlookup table. */
static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->rlookup_table),
+ GFP_KERNEL);
if (pci_seg->rlookup_table == NULL)
return -ENOMEM;
@@ -662,17 +659,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->rlookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->rlookup_table);
pci_seg->rlookup_table = NULL;
}
static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
- kmemleak_alloc(pci_seg->irq_lookup_table,
- pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->irq_lookup_table),
+ GFP_KERNEL);
if (pci_seg->irq_lookup_table == NULL)
return -ENOMEM;
@@ -681,9 +676,7 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- kmemleak_free(pci_seg->irq_lookup_table);
- iommu_free_pages(pci_seg->irq_lookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->irq_lookup_table);
pci_seg->irq_lookup_table = NULL;
}
@@ -691,8 +684,9 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
int i;
- pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->alias_table),
+ GFP_KERNEL);
if (!pci_seg->alias_table)
return -ENOMEM;
@@ -707,8 +701,7 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->alias_table,
- get_order(pci_seg->alias_table_size));
+ kvfree(pci_seg->alias_table);
pci_seg->alias_table = NULL;
}
@@ -719,8 +712,7 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
*/
static int __init alloc_command_buffer(struct amd_iommu *iommu)
{
- iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
- get_order(CMD_BUFFER_SIZE));
+ iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
return iommu->cmd_buf ? 0 : -ENOMEM;
}
@@ -817,20 +809,22 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
static void __init free_command_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
+ iommu_free_pages(iommu->cmd_buf);
}
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
size_t size)
{
- int order = get_order(size);
- void *buf = iommu_alloc_pages(gfp, order);
+ void *buf;
- if (buf &&
- check_feature(FEATURE_SNP) &&
- set_memory_4k((unsigned long)buf, (1 << order))) {
- iommu_free_pages(buf, order);
- buf = NULL;
+ size = PAGE_ALIGN(size);
+ buf = iommu_alloc_pages_sz(gfp, size);
+ if (!buf)
+ return NULL;
+ if (check_feature(FEATURE_SNP) &&
+ set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
+ iommu_free_pages(buf);
+ return NULL;
}
return buf;
@@ -873,14 +867,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
static void __init free_event_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
+ iommu_free_pages(iommu->evt_buf);
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE));
- iommu_free_pages(iommu->ga_log_tail, get_order(8));
+ iommu_free_pages(iommu->ga_log);
+ iommu_free_pages(iommu->ga_log_tail);
#endif
}
@@ -925,11 +919,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
- iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
+ iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE);
if (!iommu->ga_log)
goto err_out;
- iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
+ iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8);
if (!iommu->ga_log_tail)
goto err_out;
@@ -950,7 +944,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
static void __init free_cwwb_sem(struct amd_iommu *iommu)
{
if (iommu->cmd_sem)
- iommu_free_page((void *)iommu->cmd_sem);
+ iommu_free_pages((void *)iommu->cmd_sem);
}
static void iommu_enable_xt(struct amd_iommu *iommu)
@@ -1024,8 +1018,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
if (!old_devtb)
return false;
- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
+ GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
@@ -1599,9 +1593,9 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
pci_seg->last_bdf = last_bdf;
DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
- pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->dev_table_size =
+ max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE),
+ SZ_4K);
pci_seg->id = id;
init_llist_head(&pci_seg->dev_data_list);
@@ -2789,8 +2783,7 @@ static void early_enable_iommus(void)
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
- iommu_free_pages(pci_seg->old_dev_tbl_cpy,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->old_dev_tbl_cpy);
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2803,8 +2796,7 @@ static void early_enable_iommus(void)
pr_info("Copied DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 26cf562dde11..4d308c071134 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -47,14 +47,7 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
return fpte;
}
-static void free_pt_page(u64 *pt, struct list_head *freelist)
-{
- struct page *p = virt_to_page(pt);
-
- list_add_tail(&p->lru, freelist);
-}
-
-static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
+static void free_pt_lvl(u64 *pt, struct iommu_pages_list *freelist, int lvl)
{
u64 *p;
int i;
@@ -77,20 +70,20 @@ static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
if (lvl > 2)
free_pt_lvl(p, freelist, lvl - 1);
else
- free_pt_page(p, freelist);
+ iommu_pages_list_add(freelist, p);
}
- free_pt_page(pt, freelist);
+ iommu_pages_list_add(freelist, pt);
}
-static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
+static void free_sub_pt(u64 *root, int mode, struct iommu_pages_list *freelist)
{
switch (mode) {
case PAGE_MODE_NONE:
case PAGE_MODE_7_LEVEL:
break;
case PAGE_MODE_1_LEVEL:
- free_pt_page(root, freelist);
+ iommu_pages_list_add(freelist, root);
break;
case PAGE_MODE_2_LEVEL:
case PAGE_MODE_3_LEVEL:
@@ -121,7 +114,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
bool ret = true;
u64 *pte;
- pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ pte = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp, SZ_4K);
if (!pte)
return false;
@@ -146,7 +139,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
out:
spin_unlock_irqrestore(&domain->lock, flags);
- iommu_free_page(pte);
+ iommu_free_pages(pte);
return ret;
}
@@ -213,7 +206,8 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ page = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp,
+ SZ_4K);
if (!page)
return NULL;
@@ -222,7 +216,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -299,7 +293,8 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
return pte;
}
-static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
+static void free_clear_pte(u64 *pte, u64 pteval,
+ struct iommu_pages_list *freelist)
{
u64 *pt;
int mode;
@@ -328,7 +323,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
int prot, gfp_t gfp, size_t *mapped)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
@@ -353,7 +348,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
for (i = 0; i < count; ++i)
free_clear_pte(&pte[i], pte[i], &freelist);
- if (!list_empty(&freelist))
+ if (!iommu_pages_list_empty(&freelist))
updated = true;
if (count > 1) {
@@ -524,7 +519,7 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
static void v1_free_pgtable(struct io_pgtable *iop)
{
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (pgtable->mode == PAGE_MODE_NONE)
return;
@@ -541,7 +536,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->root =
+ iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index a56a27396305..b47941353ccb 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -121,10 +121,10 @@ static void free_pgtable(u64 *pt, int level)
if (level > 2)
free_pgtable(p, level - 1);
else
- iommu_free_page(p);
+ iommu_free_pages(p);
}
- iommu_free_page(pt);
+ iommu_free_pages(pt);
}
/* Allocate page table */
@@ -152,14 +152,14 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
}
if (!IOMMU_PTE_PRESENT(__pte)) {
- page = iommu_alloc_page_node(nid, gfp);
+ page = iommu_alloc_pages_node_sz(nid, gfp, SZ_4K);
if (!page)
return NULL;
__npte = set_pgtable_attr(page);
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -181,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
if (pg_size == IOMMU_PAGE_SIZE_1G)
free_pgtable(__pte, end_level - 1);
else if (pg_size == IOMMU_PAGE_SIZE_2M)
- iommu_free_page(__pte);
+ iommu_free_pages(__pte);
}
return pte;
@@ -346,7 +346,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->pgd = iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->pgd)
return NULL;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index f34209b08b4c..3117d99cf83d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -241,7 +241,9 @@ static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct acpihid_map_entry *p;
+ struct acpihid_map_entry *p, *p1 = NULL;
+ int hid_count = 0;
+ bool fw_bug;
if (!adev)
return -ENODEV;
@@ -249,12 +251,33 @@ static inline int get_acpihid_device_id(struct device *dev,
list_for_each_entry(p, &acpihid_map, list) {
if (acpi_dev_hid_uid_match(adev, p->hid,
p->uid[0] ? p->uid : NULL)) {
- if (entry)
- *entry = p;
- return p->devid;
+ p1 = p;
+ fw_bug = false;
+ hid_count = 1;
+ break;
+ }
+
+ /*
+ * Count HID matches w/o UID, raise FW_BUG but allow exactly one match
+ */
+ if (acpi_dev_hid_match(adev, p->hid)) {
+ p1 = p;
+ hid_count++;
+ fw_bug = true;
}
}
- return -EINVAL;
+
+ if (!p1)
+ return -EINVAL;
+ if (fw_bug)
+ dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
+ hid_count, hid_count > 1 ? "s" : "");
+ if (hid_count > 1)
+ return -EINVAL;
+ if (entry)
+ *entry = p1;
+
+ return p1->devid;
}
static inline int get_device_sbdf_id(struct device *dev)
@@ -982,6 +1005,14 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{
iommu_ga_log_notifier = notifier;
+ /*
+ * Ensure all in-flight IRQ handlers run to completion before returning
+ * to the caller, e.g. to ensure module code isn't unloaded while it's
+ * being executed in the IRQ handler.
+ */
+ if (!notifier)
+ synchronize_rcu();
+
return 0;
}
EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
@@ -1812,7 +1843,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
- iommu_free_page(ptr);
+ iommu_free_pages(ptr);
}
}
@@ -1845,7 +1876,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
/* Free per device domain ID */
pdom_id_free(gcr3_info->domid);
- iommu_free_page(gcr3_info->gcr3_tbl);
+ iommu_free_pages(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL;
}
@@ -1884,7 +1915,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
return -ENOSPC;
gcr3_info->domid = domid;
- gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
+ gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
if (gcr3_info->gcr3_tbl == NULL) {
pdom_id_free(domid);
return -ENOMEM;
@@ -2908,6 +2939,9 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
list_add_tail(&region->list, head);
+ if (amd_iommu_ht_range_ignore())
+ return;
+
region = iommu_alloc_resv_region(HT_RANGE_START,
HT_RANGE_END - HT_RANGE_START + 1,
0, IOMMU_RESV_RESERVED, GFP_KERNEL);
@@ -2984,38 +3018,6 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
-static int amd_iommu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int amd_iommu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.blocked_domain = &blocked_domain,
@@ -3029,8 +3031,6 @@ const struct iommu_ops amd_iommu_ops = {
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.def_domain_type = amd_iommu_def_domain_type,
- .dev_enable_feat = amd_iommu_dev_enable_feature,
- .dev_disable_feat = amd_iommu_dev_disable_feature,
.page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
@@ -3129,7 +3129,7 @@ static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
return table;
}
-static struct irq_remap_table *__alloc_irq_table(int nid, int order)
+static struct irq_remap_table *__alloc_irq_table(int nid, size_t size)
{
struct irq_remap_table *table;
@@ -3137,7 +3137,8 @@ static struct irq_remap_table *__alloc_irq_table(int nid, int order)
if (!table)
return NULL;
- table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order);
+ table->table = iommu_alloc_pages_node_sz(
+ nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size));
if (!table->table) {
kfree(table);
return NULL;
@@ -3191,7 +3192,6 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
struct irq_remap_table *new_table = NULL;
struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
- int order = get_order(get_irq_table_size(max_irqs));
int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
u16 alias;
@@ -3211,7 +3211,7 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- new_table = __alloc_irq_table(nid, order);
+ new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs));
if (!new_table)
return NULL;
@@ -3246,7 +3246,7 @@ out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
- iommu_free_pages(new_table->table, order);
+ iommu_free_pages(new_table->table);
kfree(new_table);
}
return table;
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
index 7c67d69f0b8c..e6767c057d01 100644
--- a/drivers/iommu/amd/ppr.c
+++ b/drivers/iommu/amd/ppr.c
@@ -48,7 +48,7 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
+ iommu_free_pages(iommu->ppr_log);
}
/*
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index e13501541fdd..757d24f67ad4 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -776,8 +776,7 @@ static void apple_dart_domain_free(struct iommu_domain *domain)
{
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
- if (dart_domain->pgtbl_ops)
- free_io_pgtable_ops(dart_domain->pgtbl_ops);
+ free_io_pgtable_ops(dart_domain->pgtbl_ops);
kfree(dart_domain);
}
diff --git a/drivers/iommu/arm/Kconfig b/drivers/iommu/arm/Kconfig
new file mode 100644
index 000000000000..ef42bbe07dbe
--- /dev/null
+++ b/drivers/iommu/arm/Kconfig
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# ARM IOMMU support
+config ARM_SMMU
+ tristate "ARM Ltd. System MMU (SMMU) Support"
+ depends on ARM64 || ARM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU if ARM
+ help
+ Support for implementations of the ARM System MMU architecture
+ versions 1 and 2.
+
+ Say Y here if your SoC includes an IOMMU device implementing
+ the ARM SMMU architecture.
+
+if ARM_SMMU
+config ARM_SMMU_LEGACY_DT_BINDINGS
+ bool "Support the legacy \"mmu-masters\" devicetree bindings"
+ depends on ARM_SMMU=y && OF
+ help
+ Support for the badly designed and deprecated "mmu-masters"
+ devicetree bindings. This allows some DMA masters to attach
+ to the SMMU but does not provide any support via the DMA API.
+ If you're lucky, you might be able to get VFIO up and running.
+
+ If you say Y here then you'll make me very sad. Instead, say N
+ and move your firmware to the utopian future that was 2016.
+
+config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
+ bool "Disable unmatched stream bypass by default" if EXPERT
+ default y
+ help
+ If your firmware is broken and fails to describe StreamIDs which
+ Linux should know about in order to manage the SMMU correctly and
+ securely, and you don't want to boot with the 'arm-smmu.disable_bypass=0'
+ command line parameter, then as a last resort you can turn it off
+ by default here. But don't. This option may be removed at any time.
+
+ Note that 'arm-smmu.disable_bypass=1' will still take precedence.
+
+config ARM_SMMU_MMU_500_CPRE_ERRATA
+ bool "Enable errata workaround for CPRE in SMMU reset path"
+ default y
+ help
+ Say Y here (by default) to apply workaround to disable
+ MMU-500's next-page prefetcher for sake of 4 known errata.
+
+ Say N here only when it is sure that any errata related to
+ prefetch enablement are not applicable on the platform.
+ Refer silicon-errata.rst for info on errata IDs.
+
+config ARM_SMMU_QCOM
+ def_tristate y
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ When running on a Qualcomm platform that has the custom variant
+ of the ARM SMMU, this needs to be built into the SMMU driver.
+
+config ARM_SMMU_QCOM_DEBUG
+ bool "ARM SMMU QCOM implementation defined debug support"
+ depends on ARM_SMMU_QCOM=y
+ help
+ Support for implementation specific debug features in ARM SMMU
+ hardware found in QTI platforms. This include support for
+ the Translation Buffer Units (TBU) that can be used to obtain
+ additional information when debugging memory management issues
+ like context faults.
+
+ Say Y here to enable debug for issues such as context faults
+ or TLB sync timeouts which requires implementation defined
+ register dumps.
+endif
+
+config ARM_SMMU_V3
+ tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+ depends on ARM64
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select GENERIC_MSI_IRQ
+ select IOMMUFD_DRIVER if IOMMUFD
+ help
+ Support for implementations of the ARM System MMU architecture
+ version 3 providing translation support to a PCIe root complex.
+
+ Say Y here if your system includes an IOMMU device implementing
+ the ARM SMMUv3 architecture.
+
+if ARM_SMMU_V3
+config ARM_SMMU_V3_SVA
+ bool "Shared Virtual Addressing support for the ARM SMMUv3"
+ select IOMMU_SVA
+ select IOMMU_IOPF
+ select MMU_NOTIFIER
+ help
+ Support for sharing process address spaces with devices using the
+ SMMUv3.
+
+ Say Y here if your system supports SVA extensions such as PCIe PASID
+ and PRI.
+
+config ARM_SMMU_V3_IOMMUFD
+ bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
+ depends on IOMMUFD
+ help
+ Support for IOMMUFD features intended to support virtual machines
+ with accelerated virtual IOMMUs.
+
+ Say Y here if you are doing development and testing on this feature.
+
+config ARM_SMMU_V3_KUNIT_TEST
+ tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on ARM_SMMU_V3_SVA
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to unit-test arm-smmu-v3 driver functions.
+
+ If unsure, say N.
+
+config TEGRA241_CMDQV
+ bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
+ depends on ACPI
+ help
+ Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
+ CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
+ support, except with virtualization capabilities.
+
+ Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
+ CMDQ-V extension.
+endif
+
+config QCOM_IOMMU
+ # Note: iommu drivers cannot (yet?) be built as modules
+ bool "Qualcomm IOMMU Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select QCOM_SCM
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for IOMMU on certain Qualcomm SoCs.
diff --git a/drivers/iommu/arm/Makefile b/drivers/iommu/arm/Makefile
index 0f9efeab709f..35a7e13eef34 100644
--- a/drivers/iommu/arm/Makefile
+++ b/drivers/iommu/arm/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += arm-smmu/ arm-smmu-v3/
+obj-y += arm-smmu/
+obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3/
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
index 493a659cc66b..6cc7c8557b9e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/Makefile
+++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
+obj-y += arm_smmu_v3.o
arm_smmu_v3-y := arm-smmu-v3.o
arm_smmu_v3-$(CONFIG_ARM_SMMU_V3_IOMMUFD) += arm-smmu-v3-iommufd.o
arm_smmu_v3-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 980cc6b33c43..0601dece0a0d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -13,8 +13,6 @@
#include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h"
-static DEFINE_MUTEX(sva_lock);
-
static void __maybe_unused
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{
@@ -257,84 +255,6 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return true;
}
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- /* We're not keeping track of SIDs in fault events */
- if (master->num_streams != 1)
- return false;
-
- return master->stall_enabled;
-}
-
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
- return false;
-
- /* SSID support is mandatory for the moment */
- return master->ssid_bits;
-}
-
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- bool enabled;
-
- mutex_lock(&sva_lock);
- enabled = master->sva_enabled;
- mutex_unlock(&sva_lock);
- return enabled;
-}
-
-static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- /*
- * Drivers for devices supporting PRI or stall should enable IOPF first.
- * Others have device-specific fault handlers and don't need IOPF.
- */
- if (!arm_smmu_master_iopf_supported(master))
- return 0;
-
- if (!master->iopf_enabled)
- return -EINVAL;
-
- return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
-}
-
-static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- if (!master->iopf_enabled)
- return;
-
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
-}
-
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- int ret;
-
- mutex_lock(&sva_lock);
- ret = arm_smmu_master_sva_enable_iopf(master);
- if (!ret)
- master->sva_enabled = true;
- mutex_unlock(&sva_lock);
-
- return ret;
-}
-
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- mutex_lock(&sva_lock);
- arm_smmu_master_sva_disable_iopf(master);
- master->sva_enabled = false;
- mutex_unlock(&sva_lock);
-
- return 0;
-}
-
void arm_smmu_sva_notifier_synchronize(void)
{
/*
@@ -353,6 +273,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_cd target;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return -EOPNOTSUPP;
+
/* Prevent arm_smmu_mm_release from being called while we are attaching */
if (!mmget_not_zero(domain->mm))
return -EINVAL;
@@ -406,6 +329,9 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
u32 asid;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return ERR_PTR(-EOPNOTSUPP);
+
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 48d910399a1b..10cc6dc26b7b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2720,6 +2720,7 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static struct arm_smmu_master_domain *
arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
+ struct iommu_domain *domain,
struct arm_smmu_master *master,
ioasid_t ssid, bool nested_ats_flush)
{
@@ -2730,6 +2731,7 @@ arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
if (master_domain->master == master &&
+ master_domain->domain == domain &&
master_domain->ssid == ssid &&
master_domain->nested_ats_flush == nested_ats_flush)
return master_domain;
@@ -2756,6 +2758,58 @@ to_smmu_domain_devices(struct iommu_domain *domain)
return NULL;
}
+static int arm_smmu_enable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ int ret;
+
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return -EOPNOTSUPP;
+
+ /*
+ * Drivers for devices supporting PRI or stall require iopf others have
+ * device-specific fault handlers and don't need IOPF, so this is not a
+ * failure.
+ */
+ if (!master->stall_enabled)
+ return 0;
+
+ /* We're not keeping track of SIDs in fault events */
+ if (master->num_streams != 1)
+ return -EOPNOTSUPP;
+
+ if (master->iopf_refcount) {
+ master->iopf_refcount++;
+ master_domain->using_iopf = true;
+ return 0;
+ }
+
+ ret = iopf_queue_add_device(master->smmu->evtq.iopf, master->dev);
+ if (ret)
+ return ret;
+ master->iopf_refcount = 1;
+ master_domain->using_iopf = true;
+ return 0;
+}
+
+static void arm_smmu_disable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return;
+
+ if (!master_domain || !master_domain->using_iopf)
+ return;
+
+ master->iopf_refcount--;
+ if (master->iopf_refcount == 0)
+ iopf_queue_remove_device(master->smmu->evtq.iopf, master->dev);
+}
+
static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
struct iommu_domain *domain,
ioasid_t ssid)
@@ -2772,15 +2826,17 @@ static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
nested_ats_flush = to_smmu_nested_domain(domain)->enable_ats;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid,
- nested_ats_flush);
+ master_domain = arm_smmu_find_master_domain(smmu_domain, domain, master,
+ ssid, nested_ats_flush);
if (master_domain) {
list_del(&master_domain->devices_elm);
- kfree(master_domain);
if (master->ats_enabled)
atomic_dec(&smmu_domain->nr_ats_masters);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ arm_smmu_disable_iopf(master, master_domain);
+ kfree(master_domain);
}
/*
@@ -2853,12 +2909,19 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
kfree(state->vmaster);
return -ENOMEM;
}
+ master_domain->domain = new_domain;
master_domain->master = master;
master_domain->ssid = state->ssid;
if (new_domain->type == IOMMU_DOMAIN_NESTED)
master_domain->nested_ats_flush =
to_smmu_nested_domain(new_domain)->enable_ats;
+ if (new_domain->iopf_handler) {
+ ret = arm_smmu_enable_iopf(master, master_domain);
+ if (ret)
+ goto err_free_master_domain;
+ }
+
/*
* During prepare we want the current smmu_domain and new
* smmu_domain to be in the devices list before we change any
@@ -2878,9 +2941,9 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
!arm_smmu_master_canwbs(master)) {
spin_unlock_irqrestore(&smmu_domain->devices_lock,
flags);
- kfree(master_domain);
kfree(state->vmaster);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iopf;
}
if (state->ats_enabled)
@@ -2899,6 +2962,12 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
wmb();
}
return 0;
+
+err_iopf:
+ arm_smmu_disable_iopf(master, master_domain);
+err_free_master_domain:
+ kfree(master_domain);
+ return ret;
}
/*
@@ -2953,7 +3022,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
smmu = master->smmu;
if (smmu_domain->smmu != smmu)
- return ret;
+ return -EINVAL;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID);
@@ -3510,8 +3579,7 @@ static void arm_smmu_release_device(struct device *dev)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (WARN_ON(arm_smmu_master_sva_enabled(master)))
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+ WARN_ON(master->iopf_refcount);
/* Put the STE back to what arm_smmu_init_strtab() sets */
if (dev->iommu->require_direct)
@@ -3586,58 +3654,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static int arm_smmu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -ENODEV;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!arm_smmu_master_iopf_supported(master))
- return -EINVAL;
- if (master->iopf_enabled)
- return -EBUSY;
- master->iopf_enabled = true;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_supported(master))
- return -EINVAL;
- if (arm_smmu_master_sva_enabled(master))
- return -EBUSY;
- return arm_smmu_master_enable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
-static int arm_smmu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -EINVAL;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!master->iopf_enabled)
- return -EINVAL;
- if (master->sva_enabled)
- return -EBUSY;
- master->iopf_enabled = false;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_enabled(master))
- return -EINVAL;
- return arm_smmu_master_disable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
/*
* HiSilicon PCIe tune and trace device can be used to trace TLP headers on the
* PCIe link and save the data to memory by DMA. The hardware is restricted to
@@ -3670,8 +3686,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .dev_enable_feat = arm_smmu_dev_enable_feature,
- .dev_disable_feat = arm_smmu_dev_disable_feature,
.page_response = arm_smmu_page_response,
.def_domain_type = arm_smmu_def_domain_type,
.viommu_alloc = arm_vsmmu_alloc,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index dd1ad56ce863..ea41d790463e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -837,9 +837,8 @@ struct arm_smmu_master {
bool ats_enabled : 1;
bool ste_ats_enabled : 1;
bool stall_enabled;
- bool sva_enabled;
- bool iopf_enabled;
unsigned int ssid_bits;
+ unsigned int iopf_refcount;
};
/* SMMU private data for an IOMMU domain */
@@ -915,8 +914,14 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
struct arm_smmu_master_domain {
struct list_head devices_elm;
struct arm_smmu_master *master;
+ /*
+ * For nested domains the master_domain is threaded onto the S2 parent,
+ * this points to the IOMMU_DOMAIN_NESTED to disambiguate the masters.
+ */
+ struct iommu_domain *domain;
ioasid_t ssid;
bool nested_ats_flush : 1;
+ bool using_iopf : 1;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -995,11 +1000,6 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
void arm_smmu_sva_notifier_synchronize(void);
struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm);
@@ -1009,31 +1009,6 @@ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return false;
}
-static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
static inline void arm_smmu_sva_notifier_synchronize(void) {}
#define arm_smmu_sva_domain_alloc NULL
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index d03b2239baad..65e0ef6539fe 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -406,6 +406,12 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
@@ -416,6 +422,9 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
if (!tmp || tmp == -EBUSY) {
ret = IRQ_HANDLED;
resume = ARM_SMMU_RESUME_TERMINATE;
+ } else if (tmp == -EAGAIN) {
+ ret = IRQ_HANDLED;
+ resume = 0;
} else {
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, cfi.iova, cfi.fsr);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 59d02687280e..62874b18f645 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -112,25 +112,39 @@ static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
{
struct arm_smmu_domain *smmu_domain = (void *)cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ u32 mask = BIT(cfg->cbndx);
+ bool stall_changed = !!(qsmmu->stall_enabled & mask) != enabled;
+ unsigned long flags;
if (enabled)
- qsmmu->stall_enabled |= BIT(cfg->cbndx);
+ qsmmu->stall_enabled |= mask;
else
- qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
-}
+ qsmmu->stall_enabled &= ~mask;
-static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
-{
- struct arm_smmu_domain *smmu_domain = (void *)cookie;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- u32 reg = 0;
+ /*
+ * If the device is on and we changed the setting, update the register.
+ * The spec pseudocode says that CFCFG is resampled after a fault, and
+ * we believe that no implementations cache it in the TLB, so it should
+ * be safe to change it without a TLB invalidation.
+ */
+ if (stall_changed && pm_runtime_get_if_active(smmu->dev) > 0) {
+ u32 reg;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR);
+
+ if (enabled)
+ reg |= ARM_SMMU_SCTLR_CFCFG;
+ else
+ reg &= ~ARM_SMMU_SCTLR_CFCFG;
- if (terminate)
- reg |= ARM_SMMU_RESUME_TERMINATE;
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR, reg);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
- arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
+ pm_runtime_put_autosuspend(smmu->dev);
+ }
}
static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
@@ -337,7 +351,6 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
- priv->resume_translation = qcom_adreno_smmu_resume_translation;
priv->set_prr_bit = NULL;
priv->set_prr_addr = NULL;
@@ -356,6 +369,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,qcm2290-mdss" },
+ { .compatible = "qcom,sar2130p-mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
{ .compatible = "qcom,sc7280-mdss" },
@@ -585,6 +599,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
@@ -594,6 +609,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 8f439c265a23..8d95b14c7d5a 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -474,6 +474,12 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 98f7205ec8fb..6c708fec48d1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -106,7 +106,7 @@ early_param("iommu.forcedac", iommu_dma_forcedac_setup);
struct iova_fq_entry {
unsigned long iova_pfn;
unsigned long pages;
- struct list_head freelist;
+ struct iommu_pages_list freelist;
u64 counter; /* Flush counter when this entry was added */
};
@@ -155,6 +155,8 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
+ fq->entries[idx].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[idx].freelist);
fq->head = (fq->head + 1) & fq->mod_mask;
}
}
@@ -193,7 +195,7 @@ static void fq_flush_timeout(struct timer_list *t)
static void queue_iova(struct iommu_dma_cookie *cookie,
unsigned long pfn, unsigned long pages,
- struct list_head *freelist)
+ struct iommu_pages_list *freelist)
{
struct iova_fq *fq;
unsigned long flags;
@@ -232,7 +234,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
- list_splice(freelist, &fq->entries[idx].freelist);
+ iommu_pages_list_splice(freelist, &fq->entries[idx].freelist);
spin_unlock_irqrestore(&fq->lock, flags);
@@ -290,7 +292,8 @@ static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
spin_lock_init(&fq->lock);
for (i = 0; i < fq_size; i++)
- INIT_LIST_HEAD(&fq->entries[i].freelist);
+ fq->entries[i].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[i].freelist);
}
static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 317266aca6e2..fcb6a0f7c082 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -902,11 +902,11 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
if (!domain)
return NULL;
- domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2);
+ domain->pgtable = iommu_alloc_pages_sz(GFP_KERNEL, SZ_16K);
if (!domain->pgtable)
goto err_pgtable;
- domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1);
+ domain->lv2entcnt = iommu_alloc_pages_sz(GFP_KERNEL, SZ_8K);
if (!domain->lv2entcnt)
goto err_counter;
@@ -932,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
return &domain->domain;
err_lv2ent:
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->lv2entcnt);
err_counter:
- iommu_free_pages(domain->pgtable, 2);
+ iommu_free_pages(domain->pgtable);
err_pgtable:
kfree(domain);
return NULL;
@@ -975,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
phys_to_virt(base));
}
- iommu_free_pages(domain->pgtable, 2);
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->pgtable);
+ iommu_free_pages(domain->lv2entcnt);
kfree(domain);
}
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 30be786bff11..5f08523f97cb 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -64,7 +64,7 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_update_paace_stash(liodn, val);
if (ret) {
- pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
+ pr_debug("Failed to update SPAACE for liodn %d\n", liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return ret;
}
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index 6c7528130cf9..ada651c4a01b 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o
-obj-$(CONFIG_DMAR_TABLE) += trace.o
+obj-y += iommu.o pasid.o nested.o cache.o prq.o
+obj-$(CONFIG_DMAR_TABLE) += dmar.o trace.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
-ifdef CONFIG_INTEL_IOMMU
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
-endif
obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index e540092d664d..b61d9ea27aa9 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1099,6 +1099,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
spin_lock_init(&iommu->device_rbtree_lock);
mutex_init(&iommu->iopf_lock);
iommu->node = NUMA_NO_NODE;
+ spin_lock_init(&iommu->lock);
+ ida_init(&iommu->domain_ida);
+ mutex_init(&iommu->did_lock);
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
@@ -1187,7 +1190,7 @@ static void free_iommu(struct intel_iommu *iommu)
}
if (iommu->qi) {
- iommu_free_page(iommu->qi->desc);
+ iommu_free_pages(iommu->qi->desc);
kfree(iommu->qi->desc_status);
kfree(iommu->qi);
}
@@ -1195,6 +1198,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
+ ida_destroy(&iommu->domain_ida);
ida_free(&dmar_seq_ids, iommu->seq_id);
kfree(iommu);
}
@@ -1681,7 +1685,6 @@ int dmar_enable_qi(struct intel_iommu *iommu)
{
struct q_inval *qi;
void *desc;
- int order;
if (!ecap_qis(iommu->ecap))
return -ENOENT;
@@ -1702,8 +1705,9 @@ int dmar_enable_qi(struct intel_iommu *iommu)
* Need two pages to accommodate 256 descriptors of 256 bits each
* if the remapping hardware supports scalable mode translation.
*/
- order = ecap_smts(iommu->ecap) ? 1 : 0;
- desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
+ desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ ecap_smts(iommu->ecap) ? SZ_8K :
+ SZ_4K);
if (!desc) {
kfree(qi);
iommu->qi = NULL;
@@ -1714,7 +1718,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
- iommu_free_page(qi->desc);
+ iommu_free_pages(qi->desc);
kfree(qi);
iommu->qi = NULL;
return -ENOMEM;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index cb0b993bebb4..7aa3932251b2 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -397,7 +397,8 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
if (!alloc)
return NULL;
- context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ SZ_4K);
if (!context)
return NULL;
@@ -571,17 +572,17 @@ static void free_context_table(struct intel_iommu *iommu)
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
- iommu_free_page(context);
+ iommu_free_pages(context);
if (!sm_supported(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
- iommu_free_page(context);
+ iommu_free_pages(context);
}
- iommu_free_page(iommu->root_entry);
+ iommu_free_pages(iommu->root_entry);
iommu->root_entry = NULL;
}
@@ -731,7 +732,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) {
uint64_t pteval, tmp;
- tmp_page = iommu_alloc_page_node(domain->nid, gfp);
+ tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp,
+ SZ_4K);
if (!tmp_page)
return NULL;
@@ -745,7 +747,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
tmp = 0ULL;
if (!try_cmpxchg64(&pte->val, &tmp, pteval))
/* Someone else set it while we were thinking; use theirs. */
- iommu_free_page(tmp_page);
+ iommu_free_pages(tmp_page);
else
domain_flush_cache(domain, pte, sizeof(*pte));
}
@@ -858,7 +860,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
- iommu_free_page(level_pte);
+ iommu_free_pages(level_pte);
}
next:
pfn += level_size(level);
@@ -882,7 +884,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- iommu_free_page(domain->pgd);
+ iommu_free_pages(domain->pgd);
domain->pgd = NULL;
}
}
@@ -894,18 +896,16 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
The 'pte' argument is the *parent* PTE, pointing to the page that is to
be freed. */
static void dma_pte_list_pagetables(struct dmar_domain *domain,
- int level, struct dma_pte *pte,
- struct list_head *freelist)
+ int level, struct dma_pte *parent_pte,
+ struct iommu_pages_list *freelist)
{
- struct page *pg;
+ struct dma_pte *pte = phys_to_virt(dma_pte_addr(parent_pte));
- pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
- list_add_tail(&pg->lru, freelist);
+ iommu_pages_list_add(freelist, pte);
if (level == 1)
return;
- pte = page_address(pg);
do {
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
dma_pte_list_pagetables(domain, level - 1, pte, freelist);
@@ -916,7 +916,7 @@ static void dma_pte_list_pagetables(struct dmar_domain *domain,
static void dma_pte_clear_level(struct dmar_domain *domain, int level,
struct dma_pte *pte, unsigned long pfn,
unsigned long start_pfn, unsigned long last_pfn,
- struct list_head *freelist)
+ struct iommu_pages_list *freelist)
{
struct dma_pte *first_pte = NULL, *last_pte = NULL;
@@ -961,7 +961,8 @@ next:
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
- unsigned long last_pfn, struct list_head *freelist)
+ unsigned long last_pfn,
+ struct iommu_pages_list *freelist)
{
if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
WARN_ON(start_pfn > last_pfn))
@@ -973,8 +974,7 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- struct page *pgd_page = virt_to_page(domain->pgd);
- list_add_tail(&pgd_page->lru, freelist);
+ iommu_pages_list_add(freelist, domain->pgd);
domain->pgd = NULL;
}
}
@@ -984,7 +984,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
@@ -1289,52 +1289,13 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-static int iommu_init_domains(struct intel_iommu *iommu)
-{
- u32 ndomains;
-
- ndomains = cap_ndoms(iommu->cap);
- pr_debug("%s: Number of Domains supported <%d>\n",
- iommu->name, ndomains);
-
- spin_lock_init(&iommu->lock);
-
- iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL);
- if (!iommu->domain_ids)
- return -ENOMEM;
-
- /*
- * If Caching mode is set, then invalid translations are tagged
- * with domain-id 0, hence we need to pre-allocate it. We also
- * use domain-id 0 as a marker for non-allocated domain-id, so
- * make sure it is not used for a real domain.
- */
- set_bit(0, iommu->domain_ids);
-
- /*
- * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
- * entry for first-level or pass-through translation modes should
- * be programmed with a domain id different from those used for
- * second-level or nested translation. We reserve a domain id for
- * this purpose. This domain id is also used for identity domain
- * in legacy mode.
- */
- set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
-
- return 0;
-}
-
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- if (!iommu->domain_ids)
- return;
-
/*
* All iommu domains must have been detached from the devices,
* hence there should be no domain IDs in use.
*/
- if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
- > NUM_RESERVED_DID))
+ if (WARN_ON(!ida_is_empty(&iommu->domain_ida)))
return;
if (iommu->gcmd & DMA_GCMD_TE)
@@ -1343,11 +1304,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
- if (iommu->domain_ids) {
- bitmap_free(iommu->domain_ids);
- iommu->domain_ids = NULL;
- }
-
if (iommu->copied_tables) {
bitmap_free(iommu->copied_tables);
iommu->copied_tables = NULL;
@@ -1380,7 +1336,6 @@ static bool first_level_by_default(struct intel_iommu *iommu)
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{
struct iommu_domain_info *info, *curr;
- unsigned long ndomains;
int num, ret = -ENOSPC;
if (domain->domain.type == IOMMU_DOMAIN_SVA)
@@ -1390,40 +1345,36 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
if (!info)
return -ENOMEM;
- spin_lock(&iommu->lock);
+ guard(mutex)(&iommu->did_lock);
curr = xa_load(&domain->iommu_array, iommu->seq_id);
if (curr) {
curr->refcnt++;
- spin_unlock(&iommu->lock);
kfree(info);
return 0;
}
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
+ num = ida_alloc_range(&iommu->domain_ida, IDA_START_DID,
+ cap_ndoms(iommu->cap) - 1, GFP_KERNEL);
+ if (num < 0) {
pr_err("%s: No free domain ids\n", iommu->name);
goto err_unlock;
}
- set_bit(num, iommu->domain_ids);
info->refcnt = 1;
info->did = num;
info->iommu = iommu;
curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
- NULL, info, GFP_ATOMIC);
+ NULL, info, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
goto err_clear;
}
- spin_unlock(&iommu->lock);
return 0;
err_clear:
- clear_bit(info->did, iommu->domain_ids);
+ ida_free(&iommu->domain_ida, info->did);
err_unlock:
- spin_unlock(&iommu->lock);
kfree(info);
return ret;
}
@@ -1435,21 +1386,21 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
if (domain->domain.type == IOMMU_DOMAIN_SVA)
return;
- spin_lock(&iommu->lock);
+ guard(mutex)(&iommu->did_lock);
info = xa_load(&domain->iommu_array, iommu->seq_id);
if (--info->refcnt == 0) {
- clear_bit(info->did, iommu->domain_ids);
+ ida_free(&iommu->domain_ida, info->did);
xa_erase(&domain->iommu_array, iommu->seq_id);
domain->nid = NUMA_NO_NODE;
kfree(info);
}
- spin_unlock(&iommu->lock);
}
static void domain_exit(struct dmar_domain *domain)
{
if (domain->pgd) {
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist =
+ IOMMU_PAGES_LIST_INIT(freelist);
domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
iommu_put_pages_list(&freelist);
@@ -1681,9 +1632,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
- attr |= DMA_FL_PTE_PRESENT;
if (domain->use_first_level) {
- attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
+ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
if (prot & DMA_PTE_WRITE)
attr |= DMA_FL_PTE_DIRTY;
}
@@ -1859,6 +1809,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
return ret;
info->domain = domain;
+ info->domain_attached = true;
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&domain->lock, flags);
@@ -2027,7 +1978,8 @@ static int copy_context_table(struct intel_iommu *iommu,
if (!old_ce)
goto out;
- new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
+ new_ce = iommu_alloc_pages_node_sz(iommu->node,
+ GFP_KERNEL, SZ_4K);
if (!new_ce)
goto out_unmap;
@@ -2042,7 +1994,7 @@ static int copy_context_table(struct intel_iommu *iommu,
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
- set_bit(did, iommu->domain_ids);
+ ida_alloc_range(&iommu->domain_ida, did, did, GFP_KERNEL);
set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
@@ -2169,11 +2121,6 @@ static int __init init_dmars(void)
}
intel_iommu_init_qi(iommu);
-
- ret = iommu_init_domains(iommu);
- if (ret)
- goto free_iommu;
-
init_translation_status(iommu);
if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
@@ -2651,9 +2598,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- ret = iommu_init_domains(iommu);
- if (ret == 0)
- ret = iommu_alloc_root_entry(iommu);
+ ret = iommu_alloc_root_entry(iommu);
if (ret)
goto out;
@@ -2744,7 +2689,6 @@ static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev)
struct device *tmp;
int i;
- dev = pci_physfn(dev);
rcu_read_lock();
list_for_each_entry_rcu(satcu, &dmar_satc_units, list) {
@@ -2761,15 +2705,16 @@ out:
return satcu;
}
-static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
+static bool dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
{
- int i, ret = 1;
- struct pci_bus *bus;
struct pci_dev *bridge = NULL;
- struct device *tmp;
- struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
struct dmar_satc_unit *satcu;
+ struct acpi_dmar_atsr *atsr;
+ bool supported = true;
+ struct pci_bus *bus;
+ struct device *tmp;
+ int i;
dev = pci_physfn(dev);
satcu = dmar_find_matched_satc_unit(dev);
@@ -2787,11 +2732,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
bridge = bus->self;
/* If it's an integrated device, allow ATS */
if (!bridge)
- return 1;
+ return true;
/* Connected via non-PCIe: no ATS */
if (!pci_is_pcie(bridge) ||
pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
+ return false;
/* If we found the root port, look it up in the ATSR */
if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
break;
@@ -2810,11 +2755,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
if (atsru->include_all)
goto out;
}
- ret = 0;
+ supported = false;
out:
rcu_read_unlock();
- return ret;
+ return supported;
}
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
@@ -2972,9 +2917,14 @@ static ssize_t domains_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
- return sysfs_emit(buf, "%d\n",
- bitmap_weight(iommu->domain_ids,
- cap_ndoms(iommu->cap)));
+ unsigned int count = 0;
+ int id;
+
+ for (id = 0; id < cap_ndoms(iommu->cap); id++)
+ if (ida_exists(&iommu->domain_ida, id))
+ count++;
+
+ return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR_RO(domains_used);
@@ -3257,6 +3207,10 @@ void device_block_translation(struct device *dev)
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
+ /* Device in DMA blocking state. Noting to do. */
+ if (!info->domain_attached)
+ return;
+
if (info->domain)
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
@@ -3268,6 +3222,9 @@ void device_block_translation(struct device *dev)
domain_context_clear(info);
}
+ /* Device now in DMA blocking state. */
+ info->domain_attached = false;
+
if (!info->domain)
return;
@@ -3282,6 +3239,9 @@ void device_block_translation(struct device *dev)
static int blocking_domain_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ iopf_for_domain_remove(info->domain ? &info->domain->domain : NULL, dev);
device_block_translation(dev);
return 0;
}
@@ -3360,7 +3320,7 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
/* always allocate the top pgd */
- domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
+ domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K);
if (!domain->pgd) {
kfree(domain);
return ERR_PTR(-ENOMEM);
@@ -3492,7 +3452,15 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
if (ret)
return ret;
- return dmar_domain_attach_device(to_dmar_domain(domain), dev);
+ ret = iopf_for_domain_set(domain, dev);
+ if (ret)
+ return ret;
+
+ ret = dmar_domain_attach_device(to_dmar_domain(domain), dev);
+ if (ret)
+ iopf_for_domain_remove(domain, dev);
+
+ return ret;
}
static int intel_iommu_map(struct iommu_domain *domain,
@@ -3603,7 +3571,8 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
cache_tag_flush_range(to_dmar_domain(domain), gather->start,
- gather->end, list_empty(&gather->freelist));
+ gather->end,
+ iommu_pages_list_empty(&gather->freelist));
iommu_put_pages_list(&gather->freelist);
}
@@ -3918,6 +3887,8 @@ int intel_iommu_enable_iopf(struct device *dev)
if (!info->pri_enabled)
return -ENODEV;
+ /* pri_enabled is protected by the group mutex. */
+ iommu_group_mutex_assert(dev);
if (info->iopf_refcount) {
info->iopf_refcount++;
return 0;
@@ -3940,43 +3911,13 @@ void intel_iommu_disable_iopf(struct device *dev)
if (WARN_ON(!info->pri_enabled || !info->iopf_refcount))
return;
+ iommu_group_mutex_assert(dev);
if (--info->iopf_refcount)
return;
iopf_queue_remove_device(iommu->iopf_queue, dev);
}
-static int
-intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
-{
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return intel_iommu_enable_iopf(dev);
-
- case IOMMU_DEV_FEAT_SVA:
- return 0;
-
- default:
- return -ENODEV;
- }
-}
-
-static int
-intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
-{
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- intel_iommu_disable_iopf(dev);
- return 0;
-
- case IOMMU_DEV_FEAT_SVA:
- return 0;
-
- default:
- return -ENODEV;
- }
-}
-
static bool intel_iommu_is_attach_deferred(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -4050,6 +3991,7 @@ static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ iopf_for_domain_remove(old, dev);
intel_pasid_tear_down_entry(info->iommu, dev, pasid, false);
domain_remove_dev_pasid(old, dev, pasid);
@@ -4123,6 +4065,10 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
+ ret = iopf_for_domain_replace(domain, old, dev);
+ if (ret)
+ goto out_remove_dev_pasid;
+
if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid, old);
@@ -4130,7 +4076,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
ret = domain_setup_second_level(iommu, dmar_domain,
dev, pasid, old);
if (ret)
- goto out_remove_dev_pasid;
+ goto out_unwind_iopf;
domain_remove_dev_pasid(old, dev, pasid);
@@ -4138,6 +4084,8 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
return 0;
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
@@ -4352,11 +4300,19 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device
if (dev_is_real_dma_subdevice(dev))
return 0;
+ /*
+ * No PRI support with the global identity domain. No need to enable or
+ * disable PRI in this path as the iommu has been put in the blocking
+ * state.
+ */
if (sm_supported(iommu))
ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
else
ret = device_setup_pass_through(dev);
+ if (!ret)
+ info->domain_attached = true;
+
return ret;
}
@@ -4371,10 +4327,16 @@ static int identity_domain_set_dev_pasid(struct iommu_domain *domain,
if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
return -EOPNOTSUPP;
- ret = domain_setup_passthrough(iommu, dev, pasid, old);
+ ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
return ret;
+ ret = domain_setup_passthrough(iommu, dev, pasid, old);
+ if (ret) {
+ iopf_for_domain_replace(old, domain, dev);
+ return ret;
+ }
+
domain_remove_dev_pasid(old, dev, pasid);
return 0;
}
@@ -4401,8 +4363,6 @@ const struct iommu_ops intel_iommu_ops = {
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.device_group = intel_iommu_device_group,
- .dev_enable_feat = intel_iommu_dev_enable_feat,
- .dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index c4916886da5a..3ddbcc603de2 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -493,14 +493,13 @@ struct q_inval {
/* Page Request Queue depth */
#define PRQ_ORDER 4
-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
-#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
+#define PRQ_SIZE (SZ_4K << PRQ_ORDER)
+#define PRQ_RING_MASK (PRQ_SIZE - 0x20)
+#define PRQ_DEPTH (PRQ_SIZE >> 5)
struct dmar_pci_notify_info;
#ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
@@ -722,7 +721,9 @@ struct intel_iommu {
unsigned char name[16]; /* Device Name */
#ifdef CONFIG_INTEL_IOMMU
- unsigned long *domain_ids; /* bitmap of domains */
+ /* mutex to protect domain_ida */
+ struct mutex did_lock;
+ struct ida domain_ida; /* domain id allocator */
unsigned long *copied_tables; /* bitmap of copied tables */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
@@ -773,6 +774,7 @@ struct device_domain_info {
u8 ats_supported:1;
u8 ats_enabled:1;
u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
+ u8 domain_attached:1; /* Device has domain attached */
u8 ats_qdep;
unsigned int iopf_refcount;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
@@ -809,11 +811,22 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
}
/*
- * Domain ID reserved for pasid entries programmed for first-level
- * only and pass-through transfer modes.
+ * Domain ID 0 and 1 are reserved:
+ *
+ * If Caching mode is set, then invalid translations are tagged
+ * with domain-id 0, hence we need to pre-allocate it. We also
+ * use domain-id 0 as a marker for non-allocated domain-id, so
+ * make sure it is not used for a real domain.
+ *
+ * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
+ * entry for first-level or pass-through translation modes should
+ * be programmed with a domain id different from those used for
+ * second-level or nested translation. We reserve a domain id for
+ * this purpose. This domain id is also used for identity domain
+ * in legacy mode.
*/
#define FLPT_DEFAULT_DID 1
-#define NUM_RESERVED_DID 2
+#define IDA_START_DID 2
/* Retrieve the domain ID which has allocated to the domain */
static inline u16
@@ -1298,6 +1311,39 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid);
int intel_iommu_enable_iopf(struct device *dev);
void intel_iommu_disable_iopf(struct device *dev);
+static inline int iopf_for_domain_set(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (!domain || !domain->iopf_handler)
+ return 0;
+
+ return intel_iommu_enable_iopf(dev);
+}
+
+static inline void iopf_for_domain_remove(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (!domain || !domain->iopf_handler)
+ return;
+
+ intel_iommu_disable_iopf(dev);
+}
+
+static inline int iopf_for_domain_replace(struct iommu_domain *new,
+ struct iommu_domain *old,
+ struct device *dev)
+{
+ int ret;
+
+ ret = iopf_for_domain_set(new, dev);
+ if (ret)
+ return ret;
+
+ iopf_for_domain_remove(old, dev);
+
+ return 0;
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 3bc2a03cceca..cf7b6882ec75 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -530,11 +530,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (!ir_table)
return -ENOMEM;
- ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
- INTR_REMAP_PAGE_ORDER);
+ /* 1MB - maximum possible interrupt remapping table size */
+ ir_table_base =
+ iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M);
if (!ir_table_base) {
- pr_err("IR%d: failed to allocate pages of order %d\n",
- iommu->seq_id, INTR_REMAP_PAGE_ORDER);
+ pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id);
goto out_free_table;
}
@@ -612,7 +612,7 @@ out_free_fwnode:
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
- iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(ir_table_base);
out_free_table:
kfree(ir_table);
@@ -633,7 +633,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
irq_domain_free_fwnode(fn);
iommu->ir_domain = NULL;
}
- iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(iommu->ir_table->base);
bitmap_free(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index 6ac5c534bef4..fc312f649f9e 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -27,8 +27,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
unsigned long flags;
int ret = 0;
- if (info->domain)
- device_block_translation(dev);
+ device_block_translation(dev);
if (iommu->agaw < dmar_domain->s2_domain->agaw) {
dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
@@ -56,17 +55,24 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
if (ret)
goto detach_iommu;
+ ret = iopf_for_domain_set(domain, dev);
+ if (ret)
+ goto unassign_tag;
+
ret = intel_pasid_setup_nested(iommu, dev,
IOMMU_NO_PASID, dmar_domain);
if (ret)
- goto unassign_tag;
+ goto disable_iopf;
info->domain = dmar_domain;
+ info->domain_attached = true;
spin_lock_irqsave(&dmar_domain->lock, flags);
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return 0;
+disable_iopf:
+ iopf_for_domain_remove(domain, dev);
unassign_tag:
cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
detach_iommu:
@@ -166,14 +172,20 @@ static int intel_nested_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
- ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
+ ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
goto out_remove_dev_pasid;
+ ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
+ if (ret)
+ goto out_unwind_iopf;
+
domain_remove_dev_pasid(old, dev, pasid);
return 0;
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 7ee18bb48bd4..ac67a056b6c8 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -60,14 +60,14 @@ int intel_pasid_alloc_table(struct device *dev)
size = max_pasid >> (PASID_PDE_SHIFT - 3);
order = size ? get_order(size) : 0;
- dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order);
+ dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL,
+ 1 << (order + PAGE_SHIFT));
if (!dir) {
kfree(pasid_table);
return -ENOMEM;
}
pasid_table->table = dir;
- pasid_table->order = order;
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
info->pasid_table = pasid_table;
@@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev)
max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
for (i = 0; i < max_pde; i++) {
table = get_pasid_table_from_pde(&dir[i]);
- iommu_free_page(table);
+ iommu_free_pages(table);
}
- iommu_free_pages(pasid_table->table, pasid_table->order);
+ iommu_free_pages(pasid_table->table);
kfree(pasid_table);
}
@@ -148,7 +148,8 @@ retry:
if (!entries) {
u64 tmp;
- entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC);
+ entries = iommu_alloc_pages_node_sz(info->iommu->node,
+ GFP_ATOMIC, SZ_4K);
if (!entries)
return NULL;
@@ -161,7 +162,7 @@ retry:
tmp = 0ULL;
if (!try_cmpxchg64(&dir[dir_index].val, &tmp,
(u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
- iommu_free_page(entries);
+ iommu_free_pages(entries);
goto retry;
}
if (!ecap_coherent(info->iommu->ecap)) {
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 668d8ece6b14..fd0fd1a0df84 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -47,7 +47,6 @@ struct pasid_entry {
/* The representative of a PASID table */
struct pasid_table {
void *table; /* pasid table pointer */
- int order; /* page order of pasid table */
u32 max_pasid; /* max pasid */
};
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 5b6a64d96850..52570e42a14c 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -290,7 +290,8 @@ int intel_iommu_enable_prq(struct intel_iommu *iommu)
struct iopf_queue *iopfq;
int irq, ret;
- iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
+ iommu->prq =
+ iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE);
if (!iommu->prq) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
@@ -340,7 +341,7 @@ free_hwirq:
dmar_free_hwirq(irq);
iommu->pr_irq = 0;
free_prq:
- iommu_free_pages(iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return ret;
@@ -363,7 +364,7 @@ int intel_iommu_finish_prq(struct intel_iommu *iommu)
iommu->iopf_queue = NULL;
}
- iommu_free_pages(iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return 0;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index ba93123cb4eb..f3da596410b5 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -164,18 +164,23 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
+ ret = iopf_for_domain_replace(domain, old, dev);
+ if (ret)
+ goto out_remove_dev_pasid;
+
/* Setup the pasid table: */
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
ret = __domain_setup_first_level(iommu, dev, pasid,
FLPT_DEFAULT_DID, mm->pgd,
sflags, old);
if (ret)
- goto out_remove_dev_pasid;
+ goto out_unwind_iopf;
domain_remove_dev_pasid(old, dev, pasid);
return 0;
-
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 7632c80edea6..96425e92f313 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
+#include <linux/device/faux.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -251,8 +252,6 @@ static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
(data->start_level == 1) && (oas == 40);
}
-static bool selftest_running = false;
-
static dma_addr_t __arm_lpae_dma_addr(void *pages)
{
return (dma_addr_t)virt_to_phys(pages);
@@ -263,16 +262,20 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *cookie)
{
struct device *dev = cfg->iommu_dev;
- int order = get_order(size);
+ size_t alloc_size;
dma_addr_t dma;
void *pages;
- VM_BUG_ON((gfp & __GFP_HIGHMEM));
-
+ /*
+ * For very small starting-level translation tables the HW requires a
+ * minimum alignment of at least 64 to cover all cases.
+ */
+ alloc_size = max(size, 64);
if (cfg->alloc)
- pages = cfg->alloc(cookie, size, gfp);
+ pages = cfg->alloc(cookie, alloc_size, gfp);
else
- pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
+ pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp,
+ alloc_size);
if (!pages)
return NULL;
@@ -300,7 +303,7 @@ out_free:
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, order);
+ iommu_free_pages(pages);
return NULL;
}
@@ -316,7 +319,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, get_order(size));
+ iommu_free_pages(pages);
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
@@ -371,7 +374,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
for (i = 0; i < num_entries; i++)
if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
/*
@@ -473,7 +476,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
cptep = iopte_deref(pte, data);
} else if (pte) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
}
@@ -641,8 +644,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
ptep += unmap_idx_start;
pte = READ_ONCE(*ptep);
- if (WARN_ON(!pte))
- return 0;
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
+ return -ENOENT;
+ }
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
@@ -652,8 +657,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* Find and handle non-leaf entries */
for (i = 0; i < num_entries; i++) {
pte = READ_ONCE(ptep[i]);
- if (WARN_ON(!pte))
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
break;
+ }
if (!iopte_leaf(pte, lvl, iop->fmt)) {
__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
@@ -968,7 +975,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
- IO_PGTABLE_QUIRK_ARM_HD))
+ IO_PGTABLE_QUIRK_ARM_HD |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1069,7 +1077,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1310,7 +1319,6 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
#define __FAIL(ops, i) ({ \
WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
arm_lpae_dump_ops(ops); \
- selftest_running = false; \
-EFAULT; \
})
@@ -1326,8 +1334,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
size_t size, mapped;
struct io_pgtable_ops *ops;
- selftest_running = true;
-
for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
cfg_cookie = cfg;
ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
@@ -1416,7 +1422,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
free_io_pgtable_ops(ops);
}
- selftest_running = false;
return 0;
}
@@ -1433,15 +1438,18 @@ static int __init arm_lpae_do_selftests(void)
};
int i, j, k, pass = 0, fail = 0;
- struct device dev;
+ struct faux_device *dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.coherent_walk = true,
- .iommu_dev = &dev,
+ .quirks = IO_PGTABLE_QUIRK_NO_WARN,
};
- /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
- set_dev_node(&dev, NUMA_NO_NODE);
+ dev = faux_device_create("io-pgtable-test", NULL, 0);
+ if (!dev)
+ return -ENOMEM;
+
+ cfg.iommu_dev = &dev->dev;
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
@@ -1461,6 +1469,8 @@ static int __init arm_lpae_do_selftests(void)
}
pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
+ faux_device_destroy(dev);
+
return fail ? -EFAULT : 0;
}
subsys_initcall(arm_lpae_do_selftests);
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 06aca9ab52f9..679bda104797 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -107,14 +107,6 @@ static phys_addr_t iopte_to_paddr(dart_iopte pte,
return paddr;
}
-static void *__dart_alloc_pages(size_t size, gfp_t gfp)
-{
- int order = get_order(size);
-
- VM_BUG_ON((gfp & __GFP_HIGHMEM));
- return iommu_alloc_pages(gfp, order);
-}
-
static int dart_init_pte(struct dart_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
dart_iopte prot, int num_entries,
@@ -256,13 +248,13 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
/* no L2 table present */
if (!pte) {
- cptep = __dart_alloc_pages(tblsz, gfp);
+ cptep = iommu_alloc_pages_sz(gfp, tblsz);
if (!cptep)
return -ENOMEM;
pte = dart_install_table(cptep, ptep, 0, data);
if (pte)
- iommu_free_pages(cptep, get_order(tblsz));
+ iommu_free_pages(cptep);
/* L2 table is present (now) */
pte = READ_ONCE(*ptep);
@@ -413,7 +405,8 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
- data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
+ data->pgd[i] =
+ iommu_alloc_pages_sz(GFP_KERNEL, DART_GRANULE(data));
if (!data->pgd[i])
goto out_free_data;
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
@@ -423,8 +416,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
out_free_data:
while (--i >= 0) {
- iommu_free_pages(data->pgd[i],
- get_order(DART_GRANULE(data)));
+ iommu_free_pages(data->pgd[i]);
}
kfree(data);
return NULL;
@@ -433,7 +425,6 @@ out_free_data:
static void apple_dart_free_pgtable(struct io_pgtable *iop)
{
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
- int order = get_order(DART_GRANULE(data));
dart_iopte *ptep, *end;
int i;
@@ -445,9 +436,9 @@ static void apple_dart_free_pgtable(struct io_pgtable *iop)
dart_iopte pte = *ptep++;
if (pte)
- iommu_free_pages(iopte_deref(pte, data), order);
+ iommu_free_pages(iopte_deref(pte, data));
}
- iommu_free_pages(data->pgd[i], order);
+ iommu_free_pages(data->pgd[i]);
}
kfree(data);
diff --git a/drivers/iommu/iommu-pages.c b/drivers/iommu/iommu-pages.c
new file mode 100644
index 000000000000..238c09e5166b
--- /dev/null
+++ b/drivers/iommu/iommu-pages.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#include "iommu-pages.h"
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+#define IOPTDESC_MATCH(pg_elm, elm) \
+ static_assert(offsetof(struct page, pg_elm) == \
+ offsetof(struct ioptdesc, elm))
+IOPTDESC_MATCH(flags, __page_flags);
+IOPTDESC_MATCH(lru, iopt_freelist_elm); /* Ensure bit 0 is clear */
+IOPTDESC_MATCH(mapping, __page_mapping);
+IOPTDESC_MATCH(private, _private);
+IOPTDESC_MATCH(page_type, __page_type);
+IOPTDESC_MATCH(_refcount, __page_refcount);
+#ifdef CONFIG_MEMCG
+IOPTDESC_MATCH(memcg_data, memcg_data);
+#endif
+#undef IOPTDESC_MATCH
+static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
+
+/**
+ * iommu_alloc_pages_node_sz - Allocate a zeroed page of a given size from
+ * specific NUMA node
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ * @size: Memory size to allocate, rounded up to a power of 2
+ *
+ * Returns the virtual address of the allocated page. The page must be freed
+ * either by calling iommu_free_pages() or via iommu_put_pages_list(). The
+ * returned allocation is round_up_pow_two(size) big, and is physically aligned
+ * to its size.
+ */
+void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size)
+{
+ unsigned long pgcnt;
+ struct folio *folio;
+ unsigned int order;
+
+ /* This uses page_address() on the memory. */
+ if (WARN_ON(gfp & __GFP_HIGHMEM))
+ return NULL;
+
+ /*
+ * Currently sub page allocations result in a full page being returned.
+ */
+ order = get_order(size);
+
+ /*
+ * __folio_alloc_node() does not handle NUMA_NO_NODE like
+ * alloc_pages_node() did.
+ */
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid);
+ if (unlikely(!folio))
+ return NULL;
+
+ /*
+ * All page allocations that should be reported to as "iommu-pagetables"
+ * to userspace must use one of the functions below. This includes
+ * allocations of page-tables and other per-iommu_domain configuration
+ * structures.
+ *
+ * This is necessary for the proper accounting as IOMMU state can be
+ * rather large, i.e. multiple gigabytes in size.
+ */
+ pgcnt = 1UL << order;
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt);
+
+ return folio_address(folio);
+}
+EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz);
+
+static void __iommu_free_desc(struct ioptdesc *iopt)
+{
+ struct folio *folio = ioptdesc_folio(iopt);
+ const unsigned long pgcnt = 1UL << folio_order(folio);
+
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt);
+ folio_put(folio);
+}
+
+/**
+ * iommu_free_pages - free pages
+ * @virt: virtual address of the page to be freed.
+ *
+ * The page must have have been allocated by iommu_alloc_pages_node_sz()
+ */
+void iommu_free_pages(void *virt)
+{
+ if (!virt)
+ return;
+ __iommu_free_desc(virt_to_ioptdesc(virt));
+}
+EXPORT_SYMBOL_GPL(iommu_free_pages);
+
+/**
+ * iommu_put_pages_list - free a list of pages.
+ * @list: The list of pages to be freed
+ *
+ * Frees a list of pages allocated by iommu_alloc_pages_node_sz(). On return the
+ * passed list is invalid, the caller must use IOMMU_PAGES_LIST_INIT to reinit
+ * the list if it expects to use it again.
+ */
+void iommu_put_pages_list(struct iommu_pages_list *list)
+{
+ struct ioptdesc *iopt, *tmp;
+
+ list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm)
+ __iommu_free_desc(iopt);
+}
+EXPORT_SYMBOL_GPL(iommu_put_pages_list);
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index 82ebf0033081..b3af2813ed0c 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -7,180 +7,95 @@
#ifndef __IOMMU_PAGES_H
#define __IOMMU_PAGES_H
-#include <linux/vmstat.h>
-#include <linux/gfp.h>
-#include <linux/mm.h>
-
-/*
- * All page allocations that should be reported to as "iommu-pagetables" to
- * userspace must use one of the functions below. This includes allocations of
- * page-tables and other per-iommu_domain configuration structures.
- *
- * This is necessary for the proper accounting as IOMMU state can be rather
- * large, i.e. multiple gigabytes in size.
- */
-
-/**
- * __iommu_alloc_account - account for newly allocated page.
- * @page: head struct page of the page.
- * @order: order of the page
- */
-static inline void __iommu_alloc_account(struct page *page, int order)
-{
- const long pgcnt = 1l << order;
-
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
-}
-
-/**
- * __iommu_free_account - account a page that is about to be freed.
- * @page: head struct page of the page.
- * @order: order of the page
- */
-static inline void __iommu_free_account(struct page *page, int order)
-{
- const long pgcnt = 1l << order;
-
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
-}
+#include <linux/iommu.h>
/**
- * __iommu_alloc_pages - allocate a zeroed page of a given order.
- * @gfp: buddy allocator flags
- * @order: page order
+ * struct ioptdesc - Memory descriptor for IOMMU page tables
+ * @iopt_freelist_elm: List element for a struct iommu_pages_list
*
- * returns the head struct page of the allocated page.
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
*/
-static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
+struct ioptdesc {
+ unsigned long __page_flags;
+
+ struct list_head iopt_freelist_elm;
+ unsigned long __page_mapping;
+ pgoff_t __index;
+ void *_private;
+
+ unsigned int __page_type;
+ atomic_t __page_refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#endif
+};
+
+static inline struct ioptdesc *folio_ioptdesc(struct folio *folio)
{
- struct page *page;
-
- page = alloc_pages(gfp | __GFP_ZERO, order);
- if (unlikely(!page))
- return NULL;
-
- __iommu_alloc_account(page, order);
-
- return page;
+ return (struct ioptdesc *)folio;
}
-/**
- * __iommu_free_pages - free page of a given order
- * @page: head struct page of the page
- * @order: page order
- */
-static inline void __iommu_free_pages(struct page *page, int order)
+static inline struct folio *ioptdesc_folio(struct ioptdesc *iopt)
{
- if (!page)
- return;
-
- __iommu_free_account(page, order);
- __free_pages(page, order);
+ return (struct folio *)iopt;
}
-/**
- * iommu_alloc_pages_node - allocate a zeroed page of a given order from
- * specific NUMA node.
- * @nid: memory NUMA node id
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * returns the virtual address of the allocated page
- */
-static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
+static inline struct ioptdesc *virt_to_ioptdesc(void *virt)
{
- struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
-
- if (unlikely(!page))
- return NULL;
-
- __iommu_alloc_account(page, order);
-
- return page_address(page);
+ return folio_ioptdesc(virt_to_folio(virt));
}
-/**
- * iommu_alloc_pages - allocate a zeroed page of a given order
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * returns the virtual address of the allocated page
- */
-static inline void *iommu_alloc_pages(gfp_t gfp, int order)
-{
- struct page *page = __iommu_alloc_pages(gfp, order);
-
- if (unlikely(!page))
- return NULL;
-
- return page_address(page);
-}
+void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size);
+void iommu_free_pages(void *virt);
+void iommu_put_pages_list(struct iommu_pages_list *list);
/**
- * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
- * @nid: memory NUMA node id
- * @gfp: buddy allocator flags
- *
- * returns the virtual address of the allocated page
+ * iommu_pages_list_add - add the page to a iommu_pages_list
+ * @list: List to add the page to
+ * @virt: Address returned from iommu_alloc_pages_node_sz()
*/
-static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
+static inline void iommu_pages_list_add(struct iommu_pages_list *list,
+ void *virt)
{
- return iommu_alloc_pages_node(nid, gfp, 0);
+ list_add_tail(&virt_to_ioptdesc(virt)->iopt_freelist_elm, &list->pages);
}
/**
- * iommu_alloc_page - allocate a zeroed page
- * @gfp: buddy allocator flags
+ * iommu_pages_list_splice - Put all the pages in list from into list to
+ * @from: Source list of pages
+ * @to: Destination list of pages
*
- * returns the virtual address of the allocated page
+ * from must be re-initialized after calling this function if it is to be
+ * used again.
*/
-static inline void *iommu_alloc_page(gfp_t gfp)
+static inline void iommu_pages_list_splice(struct iommu_pages_list *from,
+ struct iommu_pages_list *to)
{
- return iommu_alloc_pages(gfp, 0);
+ list_splice(&from->pages, &to->pages);
}
/**
- * iommu_free_pages - free page of a given order
- * @virt: virtual address of the page to be freed.
- * @order: page order
+ * iommu_pages_list_empty - True if the list is empty
+ * @list: List to check
*/
-static inline void iommu_free_pages(void *virt, int order)
+static inline bool iommu_pages_list_empty(struct iommu_pages_list *list)
{
- if (!virt)
- return;
-
- __iommu_free_pages(virt_to_page(virt), order);
+ return list_empty(&list->pages);
}
/**
- * iommu_free_page - free page
- * @virt: virtual address of the page to be freed.
- */
-static inline void iommu_free_page(void *virt)
-{
- iommu_free_pages(virt, 0);
-}
-
-/**
- * iommu_put_pages_list - free a list of pages.
- * @page: the head of the lru list to be freed.
+ * iommu_alloc_pages_sz - Allocate a zeroed page of a given size from
+ * specific NUMA node
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ * @size: Memory size to allocate, this is rounded up to a power of 2
*
- * There are no locking requirement for these pages, as they are going to be
- * put on a free list as soon as refcount reaches 0. Pages are put on this LRU
- * list once they are removed from the IOMMU page tables. However, they can
- * still be access through debugfs.
+ * Returns the virtual address of the allocated page.
*/
-static inline void iommu_put_pages_list(struct list_head *page)
+static inline void *iommu_alloc_pages_sz(gfp_t gfp, size_t size)
{
- while (!list_empty(page)) {
- struct page *p = list_entry(page->prev, struct page, lru);
-
- list_del(&p->lru);
- __iommu_free_account(p, 0);
- put_page(p);
- }
+ return iommu_alloc_pages_node_sz(NUMA_NO_NODE, gfp, size);
}
#endif /* __IOMMU_PAGES_H */
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index ab18bc494eef..1a51cfd82808 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -63,9 +63,6 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
* reference is taken. Caller must call iommu_sva_unbind_device()
* to release each reference.
*
- * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
- * initialize the required SVA features.
- *
* On error, returns an ERR_PTR value.
*/
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
@@ -299,15 +296,12 @@ static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_domain *domain;
- if (ops->domain_alloc_sva) {
- domain = ops->domain_alloc_sva(dev, mm);
- if (IS_ERR(domain))
- return domain;
- } else {
- domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
- if (!domain)
- return ERR_PTR(-ENOMEM);
- }
+ if (!ops->domain_alloc_sva)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = ops->domain_alloc_sva(dev, mm);
+ if (IS_ERR(domain))
+ return domain;
domain->type = IOMMU_DOMAIN_SVA;
domain->cookie_type = IOMMU_COOKIE_SVA;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 6c02f93422ce..a4b606c591da 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -277,6 +277,8 @@ int iommu_device_register(struct iommu_device *iommu,
err = bus_iommu_probe(iommu_buses[i]);
if (err)
iommu_device_unregister(iommu);
+ else
+ WRITE_ONCE(iommu->ready, true);
return err;
}
EXPORT_SYMBOL_GPL(iommu_device_register);
@@ -422,13 +424,15 @@ static int iommu_init_device(struct device *dev)
* is buried in the bus dma_configure path. Properly unpicking that is
* still a big job, so for now just invoke the whole thing. The device
* already having a driver bound means dma_configure has already run and
- * either found no IOMMU to wait for, or we're in its replay call right
- * now, so either way there's no point calling it again.
+ * found no IOMMU to wait for, so there's no point calling it again.
*/
- if (!dev->driver && dev->bus->dma_configure) {
+ if (!dev->iommu->fwspec && !dev->driver && dev->bus->dma_configure) {
mutex_unlock(&iommu_probe_device_lock);
dev->bus->dma_configure(dev);
mutex_lock(&iommu_probe_device_lock);
+ /* If another instance finished the job for us, skip it */
+ if (!dev->iommu || dev->iommu_group)
+ return -ENODEV;
}
/*
* At this point, relevant devices either now have a fwspec which will
@@ -1629,15 +1633,13 @@ static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev)
if (ops->identity_domain)
return ops->identity_domain;
- /* Older drivers create the identity domain via ops->domain_alloc() */
- if (!ops->domain_alloc)
+ if (ops->domain_alloc_identity) {
+ domain = ops->domain_alloc_identity(dev);
+ if (IS_ERR(domain))
+ return domain;
+ } else {
return ERR_PTR(-EOPNOTSUPP);
-
- domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY);
- if (IS_ERR(domain))
- return domain;
- if (!domain)
- return ERR_PTR(-ENOMEM);
+ }
iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops);
return domain;
@@ -2025,8 +2027,10 @@ __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type,
domain = ops->domain_alloc_paging(dev);
else if (ops->domain_alloc_paging_flags)
domain = ops->domain_alloc_paging_flags(dev, flags, NULL);
+#if IS_ENABLED(CONFIG_FSL_PAMU)
else if (ops->domain_alloc && !flags)
domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
+#endif
else
return ERR_PTR(-EOPNOTSUPP);
@@ -2204,6 +2208,19 @@ static void *iommu_make_pasid_array_entry(struct iommu_domain *domain,
return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN);
}
+static bool domain_iommu_ops_compatible(const struct iommu_ops *ops,
+ struct iommu_domain *domain)
+{
+ if (domain->owner == ops)
+ return true;
+
+ /* For static domains, owner isn't set. */
+ if (domain == ops->blocked_domain || domain == ops->identity_domain)
+ return true;
+
+ return false;
+}
+
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
@@ -2214,7 +2231,8 @@ static int __iommu_attach_group(struct iommu_domain *domain,
return -EBUSY;
dev = iommu_group_first_dev(group);
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+ if (!dev_has_iommu(dev) ||
+ !domain_iommu_ops_compatible(dev_iommu_ops(dev), domain))
return -EINVAL;
return __iommu_group_set_domain(group, domain);
@@ -2395,6 +2413,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
unsigned int pgsize_idx, pgsize_idx_next;
unsigned long pgsizes;
size_t offset, pgsize, pgsize_next;
+ size_t offset_end;
unsigned long addr_merge = paddr | iova;
/* Page sizes supported by the hardware and small enough for @size */
@@ -2435,7 +2454,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
* If size is big enough to accommodate the larger page, reduce
* the number of smaller pages.
*/
- if (offset + pgsize_next <= size)
+ if (!check_add_overflow(offset, pgsize_next, &offset_end) &&
+ offset_end <= size)
size = offset;
out_set_count:
@@ -2842,31 +2862,39 @@ bool iommu_default_passthrough(void)
}
EXPORT_SYMBOL_GPL(iommu_default_passthrough);
-const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+static const struct iommu_device *iommu_from_fwnode(const struct fwnode_handle *fwnode)
{
- const struct iommu_ops *ops = NULL;
- struct iommu_device *iommu;
+ const struct iommu_device *iommu, *ret = NULL;
spin_lock(&iommu_device_lock);
list_for_each_entry(iommu, &iommu_device_list, list)
if (iommu->fwnode == fwnode) {
- ops = iommu->ops;
+ ret = iommu;
break;
}
spin_unlock(&iommu_device_lock);
- return ops;
+ return ret;
+}
+
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+{
+ const struct iommu_device *iommu = iommu_from_fwnode(fwnode);
+
+ return iommu ? iommu->ops : NULL;
}
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
{
- const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode);
+ const struct iommu_device *iommu = iommu_from_fwnode(iommu_fwnode);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!ops)
+ if (!iommu)
return driver_deferred_probe_check_state(dev);
+ if (!dev->iommu && !READ_ONCE(iommu->ready))
+ return -EPROBE_DEFER;
if (fwspec)
- return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
+ return iommu->ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
if (!dev_iommu_get(dev))
return -ENOMEM;
@@ -2920,38 +2948,6 @@ int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
-/*
- * Per device IOMMU features.
- */
-int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev_has_iommu(dev)) {
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->dev_enable_feat)
- return ops->dev_enable_feat(dev, feat);
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
-
-/*
- * The device drivers should do the necessary cleanups before calling this.
- */
-int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev_has_iommu(dev)) {
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->dev_disable_feat)
- return ops->dev_disable_feat(dev, feat);
- }
-
- return -EBUSY;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
-
/**
* iommu_setup_default_domain - Set the default_domain for the group
* @group: Group to change
@@ -3454,7 +3450,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
!ops->blocked_domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
- if (ops != domain->owner || pasid == IOMMU_NO_PASID)
+ if (!domain_iommu_ops_compatible(ops, domain) ||
+ pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
@@ -3536,7 +3533,7 @@ int iommu_replace_device_pasid(struct iommu_domain *domain,
if (!domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
- if (dev_iommu_ops(dev) != domain->owner ||
+ if (!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain) ||
pasid == IOMMU_NO_PASID || !handle)
return -EINVAL;
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 2111bad72c72..86244403b532 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -221,7 +221,6 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
refcount_inc(&idev->obj.users);
/* igroup refcount moves into iommufd_device */
idev->igroup = igroup;
- mutex_init(&idev->iopf_lock);
/*
* If the caller fails after this success it must call
@@ -425,6 +424,25 @@ static int iommufd_hwpt_pasid_compat(struct iommufd_hw_pagetable *hwpt,
return 0;
}
+static bool iommufd_hwpt_compatible_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct pci_dev *pdev;
+
+ if (!hwpt->fault || !dev_is_pci(idev->dev))
+ return true;
+
+ /*
+ * Once we turn on PCI/PRI support for VF, the response failure code
+ * should not be forwarded to the hardware due to PRI being a shared
+ * resource between PF and VFs. There is no coordination for this
+ * shared capability. This waits for a vPRI reset to recover.
+ */
+ pdev = to_pci_dev(idev->dev);
+
+ return (!pdev->is_virtfn || !pci_pri_supported(pdev));
+}
+
static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev,
ioasid_t pasid)
@@ -432,6 +450,9 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle;
int rc;
+ if (!iommufd_hwpt_compatible_device(hwpt, idev))
+ return -EINVAL;
+
rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
if (rc)
return rc;
@@ -440,12 +461,6 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
if (!handle)
return -ENOMEM;
- if (hwpt->fault) {
- rc = iommufd_fault_iopf_enable(idev);
- if (rc)
- goto out_free_handle;
- }
-
handle->idev = idev;
if (pasid == IOMMU_NO_PASID)
rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
@@ -454,13 +469,10 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
rc = iommu_attach_device_pasid(hwpt->domain, idev->dev, pasid,
&handle->handle);
if (rc)
- goto out_disable_iopf;
+ goto out_free_handle;
return 0;
-out_disable_iopf:
- if (hwpt->fault)
- iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
@@ -492,10 +504,7 @@ static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
else
iommu_detach_device_pasid(hwpt->domain, idev->dev, pasid);
- if (hwpt->fault) {
- iommufd_auto_response_faults(hwpt, handle);
- iommufd_fault_iopf_disable(idev);
- }
+ iommufd_auto_response_faults(hwpt, handle);
kfree(handle);
}
@@ -507,6 +516,9 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
struct iommufd_attach_handle *handle, *old_handle;
int rc;
+ if (!iommufd_hwpt_compatible_device(hwpt, idev))
+ return -EINVAL;
+
rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
if (rc)
return rc;
@@ -517,12 +529,6 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
if (!handle)
return -ENOMEM;
- if (hwpt->fault && !old->fault) {
- rc = iommufd_fault_iopf_enable(idev);
- if (rc)
- goto out_free_handle;
- }
-
handle->idev = idev;
if (pasid == IOMMU_NO_PASID)
rc = iommu_replace_group_handle(idev->igroup->group,
@@ -531,20 +537,13 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
rc = iommu_replace_device_pasid(hwpt->domain, idev->dev,
pasid, &handle->handle);
if (rc)
- goto out_disable_iopf;
+ goto out_free_handle;
- if (old->fault) {
- iommufd_auto_response_faults(hwpt, old_handle);
- if (!hwpt->fault)
- iommufd_fault_iopf_disable(idev);
- }
+ iommufd_auto_response_faults(hwpt, old_handle);
kfree(old_handle);
return 0;
-out_disable_iopf:
- if (hwpt->fault && !old->fault)
- iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index f39cf0797347..e373b9eec7f5 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -9,8 +9,6 @@
#include <linux/iommufd.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/pci-ats.h>
#include <linux/poll.h>
#include <uapi/linux/iommufd.h>
@@ -18,50 +16,6 @@
#include "iommufd_private.h"
/* IOMMUFD_OBJ_FAULT Functions */
-
-int iommufd_fault_iopf_enable(struct iommufd_device *idev)
-{
- struct device *dev = idev->dev;
- int ret;
-
- /*
- * Once we turn on PCI/PRI support for VF, the response failure code
- * should not be forwarded to the hardware due to PRI being a shared
- * resource between PF and VFs. There is no coordination for this
- * shared capability. This waits for a vPRI reset to recover.
- */
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (pdev->is_virtfn && pci_pri_supported(pdev))
- return -EINVAL;
- }
-
- mutex_lock(&idev->iopf_lock);
- /* Device iopf has already been on. */
- if (++idev->iopf_enabled > 1) {
- mutex_unlock(&idev->iopf_lock);
- return 0;
- }
-
- ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- --idev->iopf_enabled;
- mutex_unlock(&idev->iopf_lock);
-
- return ret;
-}
-
-void iommufd_fault_iopf_disable(struct iommufd_device *idev)
-{
- mutex_lock(&idev->iopf_lock);
- if (!WARN_ON(idev->iopf_enabled == 0)) {
- if (--idev->iopf_enabled == 0)
- iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
- }
- mutex_unlock(&idev->iopf_lock);
-}
-
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle)
{
@@ -70,7 +24,7 @@ void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct list_head free_list;
unsigned long index;
- if (!fault)
+ if (!fault || !handle)
return;
INIT_LIST_HEAD(&free_list);
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 80e8c76d25f2..9ccc83341f32 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -425,9 +425,6 @@ struct iommufd_device {
/* always the physical device */
struct device *dev;
bool enforce_cache_coherency;
- /* protect iopf_enabled counter */
- struct mutex iopf_lock;
- unsigned int iopf_enabled;
};
static inline struct iommufd_device *
@@ -506,9 +503,6 @@ iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
void iommufd_fault_destroy(struct iommufd_object *obj);
int iommufd_fault_iopf_handler(struct iopf_group *group);
-
-int iommufd_fault_iopf_enable(struct iommufd_device *idev);
-void iommufd_fault_iopf_disable(struct iommufd_device *idev);
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle);
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 18d9a216eb30..6bd0abf9a641 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -58,6 +58,9 @@ enum {
MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
+static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
+static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain);
+
/*
* Syzkaller has trouble randomizing the correct iova to use since it is linked
* to the map ioctl's output, and it has no ide about that. So, simplify things.
@@ -168,6 +171,8 @@ struct mock_dev {
int id;
u32 cache[MOCK_DEV_CACHE_NUM];
atomic_t pasid_1024_fake_error;
+ unsigned int iopf_refcount;
+ struct iommu_domain *domain;
};
static inline struct mock_dev *to_mock_dev(struct device *dev)
@@ -221,6 +226,13 @@ static int mock_domain_nop_attach(struct iommu_domain *domain,
up_write(&mdev->viommu_rwsem);
}
+ rc = mock_dev_enable_iopf(dev, domain);
+ if (rc)
+ return rc;
+
+ mock_dev_disable_iopf(dev, mdev->domain);
+ mdev->domain = domain;
+
return 0;
}
@@ -229,6 +241,7 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
struct iommu_domain *old)
{
struct mock_dev *mdev = to_mock_dev(dev);
+ int rc;
/*
* Per the first attach with pasid 1024, set the
@@ -256,6 +269,12 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
}
}
+ rc = mock_dev_enable_iopf(dev, domain);
+ if (rc)
+ return rc;
+
+ mock_dev_disable_iopf(dev, old);
+
return 0;
}
@@ -610,22 +629,42 @@ static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt
{
}
-static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
+static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain)
{
- if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
+ struct mock_dev *mdev = to_mock_dev(dev);
+ int ret;
+
+ if (!domain || !domain->iopf_handler)
+ return 0;
+
+ if (!mock_iommu_iopf_queue)
return -ENODEV;
- return iopf_queue_add_device(mock_iommu_iopf_queue, dev);
+ if (mdev->iopf_refcount) {
+ mdev->iopf_refcount++;
+ return 0;
+ }
+
+ ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev);
+ if (ret)
+ return ret;
+
+ mdev->iopf_refcount = 1;
+
+ return 0;
}
-static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
+static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain)
{
- if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
- return -ENODEV;
+ struct mock_dev *mdev = to_mock_dev(dev);
- iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
+ if (!domain || !domain->iopf_handler)
+ return;
- return 0;
+ if (--mdev->iopf_refcount)
+ return;
+
+ iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
}
static void mock_viommu_destroy(struct iommufd_viommu *viommu)
@@ -770,8 +809,6 @@ static const struct iommu_ops mock_ops = {
.device_group = generic_device_group,
.probe_device = mock_probe_device,
.page_response = mock_domain_page_response,
- .dev_enable_feat = mock_dev_enable_feat,
- .dev_disable_feat = mock_dev_disable_feat,
.user_pasid_table = true,
.viommu_alloc = mock_viommu_alloc,
.default_domain_ops =
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index e424b279a8cd..90341b24a811 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1090,7 +1090,8 @@ static int ipmmu_probe(struct platform_device *pdev)
if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu))
return 0;
- ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, dev_name(&pdev->dev));
+ ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s",
+ dev_name(&pdev->dev));
if (ret)
return ret;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index df98d0c65f54..cb95fecf6016 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1550,6 +1550,31 @@ static const struct mtk_iommu_plat_data mt6795_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */
};
+static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0}, /* Region0: larb0/1 */
+ [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
+ [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */
+ 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
+ ~0, ~0, ~0, ~0, ~0},
+ [3] = {0},
+ [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
+ [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */
+};
+
+static const struct mtk_iommu_plat_data mt6893_data = {
+ .m4u_plat = M4U_MT8192,
+ .flags = HAS_BCLK | OUT_ORDER_WR_EN | HAS_SUB_COMM_2BITS |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8192_larb_region_msk,
+ .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
+ {0, 14, 16}, {0, 13, 18, 17}},
+};
+
static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
@@ -1673,17 +1698,6 @@ static const struct mtk_iommu_plat_data mt8188_data_vpp = {
27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}},
};
-static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
- [0] = {~0, ~0}, /* Region0: larb0/1 */
- [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
- [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */
- 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
- ~0, ~0, ~0, ~0, ~0},
- [3] = {0},
- [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
- [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */
-};
-
static const struct mtk_iommu_plat_data mt8192_data = {
.m4u_plat = M4U_MT8192,
.flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
@@ -1777,6 +1791,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data},
+ { .compatible = "mediatek,mt6893-iommu-mm", .data = &mt6893_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
diff --git a/drivers/iommu/riscv/Makefile b/drivers/iommu/riscv/Makefile
index f54c9ed17d41..b5929f9f23e6 100644
--- a/drivers/iommu/riscv/Makefile
+++ b/drivers/iommu/riscv/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o
+obj-y += iommu.o iommu-platform.o
obj-$(CONFIG_RISCV_IOMMU_PCI) += iommu-pci.o
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 8f049d4a0e2c..bb57092ca901 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -48,14 +48,13 @@ static DEFINE_IDA(riscv_iommu_pscids);
/* Device resource-managed allocations */
struct riscv_iommu_devres {
void *addr;
- int order;
};
static void riscv_iommu_devres_pages_release(struct device *dev, void *res)
{
struct riscv_iommu_devres *devres = res;
- iommu_free_pages(devres->addr, devres->order);
+ iommu_free_pages(devres->addr);
}
static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p)
@@ -66,13 +65,14 @@ static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p
return devres->addr == target->addr;
}
-static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
+static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu,
+ unsigned int size)
{
struct riscv_iommu_devres *devres;
void *addr;
- addr = iommu_alloc_pages_node(dev_to_node(iommu->dev),
- GFP_KERNEL_ACCOUNT, order);
+ addr = iommu_alloc_pages_node_sz(dev_to_node(iommu->dev),
+ GFP_KERNEL_ACCOUNT, size);
if (unlikely(!addr))
return NULL;
@@ -80,12 +80,11 @@ static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
sizeof(struct riscv_iommu_devres), GFP_KERNEL);
if (unlikely(!devres)) {
- iommu_free_pages(addr, order);
+ iommu_free_pages(addr);
return NULL;
}
devres->addr = addr;
- devres->order = order;
devres_add(iommu->dev, devres);
@@ -163,9 +162,9 @@ static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu,
} else {
do {
const size_t queue_size = entry_size << (logsz + 1);
- const int order = get_order(queue_size);
- queue->base = riscv_iommu_get_pages(iommu, order);
+ queue->base = riscv_iommu_get_pages(
+ iommu, max(queue_size, SZ_4K));
queue->phys = __pa(queue->base);
} while (!queue->base && logsz-- > 0);
}
@@ -620,7 +619,7 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm
break;
}
- ptr = riscv_iommu_get_pages(iommu, 0);
+ ptr = riscv_iommu_get_pages(iommu, SZ_4K);
if (!ptr)
return NULL;
@@ -700,7 +699,7 @@ static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu)
}
if (!iommu->ddt_root) {
- iommu->ddt_root = riscv_iommu_get_pages(iommu, 0);
+ iommu->ddt_root = riscv_iommu_get_pages(iommu, SZ_4K);
iommu->ddt_phys = __pa(iommu->ddt_root);
}
@@ -1087,7 +1086,8 @@ static void riscv_iommu_iotlb_sync(struct iommu_domain *iommu_domain,
#define _io_pte_entry(pn, prot) ((_PAGE_PFN_MASK & ((pn) << _PAGE_PFN_SHIFT)) | (prot))
static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain,
- unsigned long pte, struct list_head *freelist)
+ unsigned long pte,
+ struct iommu_pages_list *freelist)
{
unsigned long *ptr;
int i;
@@ -1105,9 +1105,9 @@ static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain,
}
if (freelist)
- list_add_tail(&virt_to_page(ptr)->lru, freelist);
+ iommu_pages_list_add(freelist, ptr);
else
- iommu_free_page(ptr);
+ iommu_free_pages(ptr);
}
static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain,
@@ -1144,13 +1144,14 @@ pte_retry:
* page table. This might race with other mappings, retry.
*/
if (_io_pte_none(pte)) {
- addr = iommu_alloc_page_node(domain->numa_node, gfp);
+ addr = iommu_alloc_pages_node_sz(domain->numa_node, gfp,
+ SZ_4K);
if (!addr)
return NULL;
old = pte;
pte = _io_pte_entry(virt_to_pfn(addr), _PAGE_TABLE);
if (cmpxchg_relaxed(ptr, old, pte) != old) {
- iommu_free_page(addr);
+ iommu_free_pages(addr);
goto pte_retry;
}
}
@@ -1194,7 +1195,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
unsigned long *ptr;
unsigned long pte, old, pte_prot;
int rc = 0;
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (!(prot & IOMMU_WRITE))
pte_prot = _PAGE_BASE | _PAGE_READ;
@@ -1225,7 +1226,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
*mapped = size;
- if (!list_empty(&freelist)) {
+ if (!iommu_pages_list_empty(&freelist)) {
/*
* In 1.0 spec version, the smallest scope we can use to
* invalidate all levels of page table (i.e. leaf and non-leaf)
@@ -1385,8 +1386,8 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
domain->numa_node = dev_to_node(iommu->dev);
domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD);
domain->pgd_mode = pgd_mode;
- domain->pgd_root = iommu_alloc_page_node(domain->numa_node,
- GFP_KERNEL_ACCOUNT);
+ domain->pgd_root = iommu_alloc_pages_node_sz(domain->numa_node,
+ GFP_KERNEL_ACCOUNT, SZ_4K);
if (!domain->pgd_root) {
kfree(domain);
return ERR_PTR(-ENOMEM);
@@ -1395,7 +1396,7 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1,
RISCV_IOMMU_MAX_PSCID, GFP_KERNEL);
if (domain->pscid < 0) {
- iommu_free_page(domain->pgd_root);
+ iommu_free_pages(domain->pgd_root);
kfree(domain);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index af4cc91b2bbf..22f74ba33a0e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -730,14 +730,15 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (rk_dte_is_pt_valid(dte))
goto done;
- page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
+ page_table = iommu_alloc_pages_sz(GFP_ATOMIC | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!page_table)
return ERR_PTR(-ENOMEM);
pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) {
dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n");
- iommu_free_page(page_table);
+ iommu_free_pages(page_table);
return ERR_PTR(-ENOMEM);
}
@@ -1062,7 +1063,8 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
- rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
+ rk_domain->dt = iommu_alloc_pages_sz(GFP_KERNEL | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!rk_domain->dt)
goto err_free_domain;
@@ -1086,7 +1088,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
return &rk_domain->domain;
err_free_dt:
- iommu_free_page(rk_domain->dt);
+ iommu_free_pages(rk_domain->dt);
err_free_domain:
kfree(rk_domain);
@@ -1107,13 +1109,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
u32 *page_table = phys_to_virt(pt_phys);
dma_unmap_single(rk_domain->dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
- iommu_free_page(page_table);
+ iommu_free_pages(page_table);
}
}
dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
- iommu_free_page(rk_domain->dt);
+ iommu_free_pages(rk_domain->dt);
kfree(rk_domain);
}
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index e1c76e0f9c2b..433b59f43530 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -31,10 +31,21 @@ struct s390_domain {
unsigned long *dma_table;
spinlock_t list_lock;
struct rcu_head rcu;
+ u8 origin_type;
};
static struct iommu_domain blocking_domain;
+static inline unsigned int calc_rfx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_rsx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK;
+}
+
static inline unsigned int calc_rtx(dma_addr_t ptr)
{
return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
@@ -56,6 +67,20 @@ static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
*entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
}
+static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (rso & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RFX;
+}
+
+static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (rto & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RSX;
+}
+
static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
{
*entry &= ZPCI_RTE_FLAG_MASK;
@@ -70,6 +95,22 @@ static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
*entry |= ZPCI_TABLE_TYPE_SX;
}
+static inline void validate_rf_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RFX;
+}
+
+static inline void validate_rs_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RSX;
+}
+
static inline void validate_rt_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
@@ -120,6 +161,22 @@ static inline int pt_entry_isvalid(unsigned long entry)
return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
}
+static inline unsigned long *get_rf_rso(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
+static inline unsigned long *get_rs_rto(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
static inline unsigned long *get_rt_sto(unsigned long entry)
{
if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
@@ -191,18 +248,59 @@ static void dma_free_seg_table(unsigned long entry)
dma_free_cpu_table(sto);
}
-static void dma_cleanup_tables(unsigned long *table)
+static void dma_free_rt_table(unsigned long entry)
{
+ unsigned long *rto = get_rs_rto(entry);
int rtx;
- if (!table)
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(rto[rtx]))
+ dma_free_seg_table(rto[rtx]);
+
+ dma_free_cpu_table(rto);
+}
+
+static void dma_free_rs_table(unsigned long entry)
+{
+ unsigned long *rso = get_rf_rso(entry);
+ int rsx;
+
+ for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
+ if (reg_entry_isvalid(rso[rsx]))
+ dma_free_rt_table(rso[rsx]);
+
+ dma_free_cpu_table(rso);
+}
+
+static void dma_cleanup_tables(struct s390_domain *domain)
+{
+ int rtx, rsx, rfx;
+
+ if (!domain->dma_table)
return;
- for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
- if (reg_entry_isvalid(table[rtx]))
- dma_free_seg_table(table[rtx]);
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++)
+ if (reg_entry_isvalid(domain->dma_table[rfx]))
+ dma_free_rs_table(domain->dma_table[rfx]);
+ break;
+ case ZPCI_TABLE_TYPE_RSX:
+ for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
+ if (reg_entry_isvalid(domain->dma_table[rsx]))
+ dma_free_rt_table(domain->dma_table[rsx]);
+ break;
+ case ZPCI_TABLE_TYPE_RTX:
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(domain->dma_table[rtx]))
+ dma_free_seg_table(domain->dma_table[rtx]);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
+ return;
+ }
- dma_free_cpu_table(table);
+ dma_free_cpu_table(domain->dma_table);
}
static unsigned long *dma_alloc_page_table(gfp_t gfp)
@@ -218,6 +316,70 @@ static unsigned long *dma_alloc_page_table(gfp_t gfp)
return table;
}
+static unsigned long *dma_walk_rs_table(unsigned long *rso,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned int rsx = calc_rsx(dma_addr);
+ unsigned long old_rse, rse;
+ unsigned long *rsep, *rto;
+
+ rsep = &rso[rsx];
+ rse = READ_ONCE(*rsep);
+ if (reg_entry_isvalid(rse)) {
+ rto = get_rs_rto(rse);
+ } else {
+ rto = dma_alloc_cpu_table(gfp);
+ if (!rto)
+ return NULL;
+
+ set_rs_rto(&rse, virt_to_phys(rto));
+ validate_rs_entry(&rse);
+ entry_clr_protected(&rse);
+
+ old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse);
+ if (old_rse != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(rto);
+ rto = get_rs_rto(old_rse);
+ }
+ }
+ return rto;
+}
+
+static unsigned long *dma_walk_rf_table(unsigned long *rfo,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned int rfx = calc_rfx(dma_addr);
+ unsigned long old_rfe, rfe;
+ unsigned long *rfep, *rso;
+
+ rfep = &rfo[rfx];
+ rfe = READ_ONCE(*rfep);
+ if (reg_entry_isvalid(rfe)) {
+ rso = get_rf_rso(rfe);
+ } else {
+ rso = dma_alloc_cpu_table(gfp);
+ if (!rso)
+ return NULL;
+
+ set_rf_rso(&rfe, virt_to_phys(rso));
+ validate_rf_entry(&rfe);
+ entry_clr_protected(&rfe);
+
+ old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe);
+ if (old_rfe != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(rso);
+ rso = get_rf_rso(old_rfe);
+ }
+ }
+
+ if (!rso)
+ return NULL;
+
+ return dma_walk_rs_table(rso, dma_addr, gfp);
+}
+
static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
{
unsigned long old_rte, rte;
@@ -271,11 +433,31 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
return pto;
}
-static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, gfp_t gfp)
+static unsigned long *dma_walk_region_tables(struct s390_domain *domain,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ return dma_walk_rf_table(domain->dma_table, dma_addr, gfp);
+ case ZPCI_TABLE_TYPE_RSX:
+ return dma_walk_rs_table(domain->dma_table, dma_addr, gfp);
+ case ZPCI_TABLE_TYPE_RTX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
+static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain,
+ dma_addr_t dma_addr, gfp_t gfp)
{
- unsigned long *sto, *pto;
+ unsigned long *rto, *sto, *pto;
unsigned int rtx, sx, px;
+ rto = dma_walk_region_tables(domain, dma_addr, gfp);
+ if (!rto)
+ return NULL;
+
rtx = calc_rtx(dma_addr);
sto = dma_get_seg_table_origin(&rto[rtx], gfp);
if (!sto)
@@ -329,9 +511,25 @@ static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
}
}
+static inline u64 max_tbl_size(struct s390_domain *domain)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RTX:
+ return ZPCI_TABLE_SIZE_RT - 1;
+ case ZPCI_TABLE_TYPE_RSX:
+ return ZPCI_TABLE_SIZE_RS - 1;
+ case ZPCI_TABLE_TYPE_RFX:
+ return U64_MAX;
+ default:
+ return 0;
+ }
+}
+
static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
{
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct s390_domain *s390_domain;
+ u64 aperture_size;
s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
if (!s390_domain)
@@ -342,9 +540,26 @@ static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
kfree(s390_domain);
return NULL;
}
+
+ aperture_size = min(s390_iommu_aperture,
+ zdev->end_dma - zdev->start_dma + 1);
+ if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
+ } else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) &&
+ (zdev->dtsm & ZPCI_IOTA_DT_RS)) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX;
+ } else if (zdev->dtsm & ZPCI_IOTA_DT_RF) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX;
+ } else {
+ /* Assume RTX available */
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
+ aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma;
+ }
+ zdev->end_dma = zdev->start_dma + aperture_size - 1;
+
s390_domain->domain.geometry.force_aperture = true;
s390_domain->domain.geometry.aperture_start = 0;
- s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1;
+ s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain);
spin_lock_init(&s390_domain->list_lock);
INIT_LIST_HEAD_RCU(&s390_domain->devices);
@@ -356,7 +571,7 @@ static void s390_iommu_rcu_free_domain(struct rcu_head *head)
{
struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
- dma_cleanup_tables(s390_domain->dma_table);
+ dma_cleanup_tables(s390_domain);
kfree(s390_domain);
}
@@ -381,6 +596,21 @@ static void zdev_s390_domain_update(struct zpci_dev *zdev,
spin_unlock_irqrestore(&zdev->dom_lock, flags);
}
+static u64 get_iota_region_flag(struct s390_domain *domain)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RTX:
+ return ZPCI_IOTA_RTTO_FLAG;
+ case ZPCI_TABLE_TYPE_RSX:
+ return ZPCI_IOTA_RSTO_FLAG;
+ case ZPCI_TABLE_TYPE_RFX:
+ return ZPCI_IOTA_RFTO_FLAG;
+ default:
+ WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
+ return 0;
+ }
+}
+
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@@ -399,7 +629,7 @@ static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
default:
s390_domain = to_s390_domain(domain);
iota = virt_to_phys(s390_domain->dma_table) |
- ZPCI_IOTA_RTTO_FLAG;
+ get_iota_region_flag(s390_domain);
rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
zdev->end_dma, iota, status);
}
@@ -482,6 +712,8 @@ static void s390_iommu_get_resv_regions(struct device *dev,
{
struct zpci_dev *zdev = to_zpci_dev(dev);
struct iommu_resv_region *region;
+ u64 max_size, end_resv;
+ unsigned long flags;
if (zdev->start_dma) {
region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
@@ -491,10 +723,21 @@ static void s390_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, list);
}
- if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) {
- region = iommu_alloc_resv_region(zdev->end_dma + 1,
- ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1,
- 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
+ spin_lock_irqsave(&zdev->dom_lock, flags);
+ if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) {
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+ return;
+ }
+
+ max_size = max_tbl_size(to_s390_domain(zdev->s390_domain));
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+
+ if (zdev->end_dma < max_size) {
+ end_resv = max_size - zdev->end_dma;
+ region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv,
+ 0, IOMMU_RESV_RESERVED,
+ GFP_KERNEL);
if (!region)
return;
list_add_tail(&region->list, list);
@@ -510,13 +753,9 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
zdev = to_zpci_dev(dev);
- if (zdev->start_dma > zdev->end_dma ||
- zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1)
+ if (zdev->start_dma > zdev->end_dma)
return ERR_PTR(-EINVAL);
- if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
- zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
-
if (zdev->tlb_refresh)
dev->iommu->shadow_on_flush = 1;
@@ -606,8 +845,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
int rc;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
- gfp);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
if (unlikely(!entry)) {
rc = -ENOMEM;
goto undo_cpu_trans;
@@ -622,8 +860,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
undo_cpu_trans:
while (i-- > 0) {
dma_addr -= PAGE_SIZE;
- entry = dma_walk_cpu_trans(s390_domain->dma_table,
- dma_addr, gfp);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
if (!entry)
break;
dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
@@ -640,8 +877,7 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
int rc = 0;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
- GFP_ATOMIC);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC);
if (unlikely(!entry)) {
rc = -EINVAL;
break;
@@ -685,6 +921,51 @@ static int s390_iommu_map_pages(struct iommu_domain *domain,
return rc;
}
+static unsigned long *get_rso_from_iova(struct s390_domain *domain,
+ dma_addr_t iova)
+{
+ unsigned long *rfo;
+ unsigned long rfe;
+ unsigned int rfx;
+
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ rfo = domain->dma_table;
+ rfx = calc_rfx(iova);
+ rfe = READ_ONCE(rfo[rfx]);
+ if (!reg_entry_isvalid(rfe))
+ return NULL;
+ return get_rf_rso(rfe);
+ case ZPCI_TABLE_TYPE_RSX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
+static unsigned long *get_rto_from_iova(struct s390_domain *domain,
+ dma_addr_t iova)
+{
+ unsigned long *rso;
+ unsigned long rse;
+ unsigned int rsx;
+
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ case ZPCI_TABLE_TYPE_RSX:
+ rso = get_rso_from_iova(domain, iova);
+ rsx = calc_rsx(iova);
+ rse = READ_ONCE(rso[rsx]);
+ if (!reg_entry_isvalid(rse))
+ return NULL;
+ return get_rs_rto(rse);
+ case ZPCI_TABLE_TYPE_RTX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -698,10 +979,13 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
iova > domain->geometry.aperture_end)
return 0;
+ rto = get_rto_from_iova(s390_domain, iova);
+ if (!rto)
+ return 0;
+
rtx = calc_rtx(iova);
sx = calc_sx(iova);
px = calc_px(iova);
- rto = s390_domain->dma_table;
rte = READ_ONCE(rto[rtx]);
if (reg_entry_isvalid(rte)) {
@@ -756,7 +1040,6 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
int zpci_init_iommu(struct zpci_dev *zdev)
{
- u64 aperture_size;
int rc = 0;
rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
@@ -774,12 +1057,6 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_sysfs;
- zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
- aperture_size = min3(s390_iommu_aperture,
- ZPCI_TABLE_SIZE_RT - zdev->start_dma,
- zdev->end_dma - zdev->start_dma + 1);
- zdev->end_dma = zdev->start_dma + aperture_size - 1;
-
return 0;
out_sysfs:
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 8d8f11854676..76c9620af4bb 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -690,8 +690,8 @@ sun50i_iommu_domain_alloc_paging(struct device *dev)
if (!sun50i_domain)
return NULL;
- sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(DT_SIZE));
+ sun50i_domain->dt =
+ iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, DT_SIZE);
if (!sun50i_domain->dt)
goto err_free_domain;
@@ -713,7 +713,7 @@ static void sun50i_iommu_domain_free(struct iommu_domain *domain)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
- iommu_free_pages(sun50i_domain->dt, get_order(DT_SIZE));
+ iommu_free_pages(sun50i_domain->dt);
sun50i_domain->dt = NULL;
kfree(sun50i_domain);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 69d353e1df84..61897d50162d 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -51,14 +51,17 @@ struct tegra_smmu {
struct iommu_device iommu; /* IOMMU Core code handle */
};
+struct tegra_pd;
+struct tegra_pt;
+
struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
spinlock_t lock;
u32 *count;
- struct page **pts;
- struct page *pd;
+ struct tegra_pt **pts;
+ struct tegra_pd *pd;
dma_addr_t pd_dma;
unsigned id;
u32 attr;
@@ -155,6 +158,14 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
SMMU_PDE_NONSECURE)
+struct tegra_pd {
+ u32 val[SMMU_NUM_PDE];
+};
+
+struct tegra_pt {
+ u32 val[SMMU_NUM_PTE];
+};
+
static unsigned int iova_pd_index(unsigned long iova)
{
return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
@@ -284,7 +295,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0);
+ as->pd = iommu_alloc_pages_sz(GFP_KERNEL | __GFP_DMA, SMMU_SIZE_PD);
if (!as->pd) {
kfree(as);
return NULL;
@@ -292,7 +303,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
if (!as->count) {
- __iommu_free_pages(as->pd, 0);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
@@ -300,7 +311,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
if (!as->pts) {
kfree(as->count);
- __iommu_free_pages(as->pd, 0);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
@@ -417,8 +428,8 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
goto unlock;
}
- as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
- DMA_TO_DEVICE);
+ as->pd_dma =
+ dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, as->pd_dma)) {
err = -ENOMEM;
goto unlock;
@@ -450,7 +461,7 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
return 0;
err_unmap:
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
unlock:
mutex_unlock(&smmu->lock);
@@ -469,7 +480,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
tegra_smmu_free_asid(smmu, as->id);
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
as->smmu = NULL;
@@ -548,11 +559,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
+ struct tegra_pd *pd = as->pd;
unsigned long offset = pd_index * sizeof(*pd);
/* Set the page directory entry first */
- pd[pd_index] = value;
+ pd->val[pd_index] = value;
/* The flush the page directory entry from caches */
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
@@ -564,11 +575,9 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+static u32 *tegra_smmu_pte_offset(struct tegra_pt *pt, unsigned long iova)
{
- u32 *pt = page_address(pt_page);
-
- return pt + iova_pt_index(iova);
+ return &pt->val[iova_pt_index(iova)];
}
static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
@@ -576,21 +585,19 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- struct page *pt_page;
- u32 *pd;
+ struct tegra_pt *pt;
- pt_page = as->pts[pd_index];
- if (!pt_page)
+ pt = as->pts[pd_index];
+ if (!pt)
return NULL;
- pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]);
- return tegra_smmu_pte_offset(pt_page, iova);
+ return tegra_smmu_pte_offset(pt, iova);
}
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- dma_addr_t *dmap, struct page *page)
+ dma_addr_t *dmap, struct tegra_pt *pt)
{
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
@@ -598,30 +605,28 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
if (!as->pts[pde]) {
dma_addr_t dma;
- dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
+ dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
- __iommu_free_pages(page, 0);
+ iommu_free_pages(pt);
return NULL;
}
if (!smmu_dma_addr_valid(smmu, dma)) {
- dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
- __iommu_free_pages(page, 0);
+ dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
return NULL;
}
- as->pts[pde] = page;
+ as->pts[pde] = pt;
tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
SMMU_PDE_NEXT));
*dmap = dma;
} else {
- u32 *pd = page_address(as->pd);
-
- *dmap = smmu_pde_to_dma(smmu, pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -637,7 +642,7 @@ static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
{
unsigned int pde = iova_pd_index(iova);
- struct page *page = as->pts[pde];
+ struct tegra_pt *pt = as->pts[pde];
/*
* When no entries in this page table are used anymore, return the
@@ -645,13 +650,13 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
*/
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]);
tegra_smmu_set_pde(as, iova, 0);
- dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
- __iommu_free_pages(page, 0);
+ dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
as->pts[pde] = NULL;
}
}
@@ -671,16 +676,16 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static struct page *as_get_pde_page(struct tegra_smmu_as *as,
- unsigned long iova, gfp_t gfp,
- unsigned long *flags)
+static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as,
+ unsigned long iova, gfp_t gfp,
+ unsigned long *flags)
{
unsigned int pde = iova_pd_index(iova);
- struct page *page = as->pts[pde];
+ struct tegra_pt *pt = as->pts[pde];
/* at first check whether allocation needs to be done at all */
- if (page)
- return page;
+ if (pt)
+ return pt;
/*
* In order to prevent exhaustion of the atomic memory pool, we
@@ -690,7 +695,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
if (gfpflags_allow_blocking(gfp))
spin_unlock_irqrestore(&as->lock, *flags);
- page = __iommu_alloc_pages(gfp | __GFP_DMA, 0);
+ pt = iommu_alloc_pages_sz(gfp | __GFP_DMA, SMMU_SIZE_PT);
if (gfpflags_allow_blocking(gfp))
spin_lock_irqsave(&as->lock, *flags);
@@ -701,13 +706,13 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
* if allocation succeeded and the allocation failure isn't fatal.
*/
if (as->pts[pde]) {
- if (page)
- __iommu_free_pages(page, 0);
+ if (pt)
+ iommu_free_pages(pt);
- page = as->pts[pde];
+ pt = as->pts[pde];
}
- return page;
+ return pt;
}
static int
@@ -717,15 +722,15 @@ __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
- struct page *page;
+ struct tegra_pt *pt;
u32 pte_attrs;
u32 *pte;
- page = as_get_pde_page(as, iova, gfp, flags);
- if (!page)
+ pt = as_get_pde_page(as, iova, gfp, flags);
+ if (!pt)
return -ENOMEM;
- pte = as_get_pte(as, iova, &pte_dma, page);
+ pte = as_get_pte(as, iova, &pte_dma, pt);
if (!pte)
return -ENOMEM;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index b85ce6310ddb..ecd41fb03e5a 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -48,6 +48,7 @@ struct viommu_dev {
u64 pgsize_bitmap;
u32 first_domain;
u32 last_domain;
+ u32 identity_domain_id;
/* Supported MAP flags */
u32 map_flags;
u32 probe_size;
@@ -62,7 +63,6 @@ struct viommu_mapping {
struct viommu_domain {
struct iommu_domain domain;
struct viommu_dev *viommu;
- struct mutex mutex; /* protects viommu pointer */
unsigned int id;
u32 map_flags;
@@ -70,7 +70,6 @@ struct viommu_domain {
struct rb_root_cached mappings;
unsigned long nr_endpoints;
- bool bypass;
};
struct viommu_endpoint {
@@ -97,6 +96,8 @@ struct viommu_event {
};
};
+static struct viommu_domain viommu_identity_domain;
+
#define to_viommu_domain(domain) \
container_of(domain, struct viommu_domain, domain)
@@ -305,6 +306,22 @@ out_unlock:
return ret;
}
+static int viommu_send_attach_req(struct viommu_dev *viommu, struct device *dev,
+ struct virtio_iommu_req_attach *req)
+{
+ int ret;
+ unsigned int i;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ req->endpoint = cpu_to_le32(fwspec->ids[i]);
+ ret = viommu_send_req_sync(viommu, req, sizeof(*req));
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
/*
* viommu_add_mapping - add a mapping to the internal tree
*
@@ -637,71 +654,45 @@ static void viommu_event_handler(struct virtqueue *vq)
/* IOMMU API */
-static struct iommu_domain *viommu_domain_alloc(unsigned type)
+static struct iommu_domain *viommu_domain_alloc_paging(struct device *dev)
{
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct viommu_dev *viommu = vdev->viommu;
+ unsigned long viommu_page_size;
struct viommu_domain *vdomain;
-
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
-
- vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
- if (!vdomain)
- return NULL;
-
- mutex_init(&vdomain->mutex);
- spin_lock_init(&vdomain->mappings_lock);
- vdomain->mappings = RB_ROOT_CACHED;
-
- return &vdomain->domain;
-}
-
-static int viommu_domain_finalise(struct viommu_endpoint *vdev,
- struct iommu_domain *domain)
-{
int ret;
- unsigned long viommu_page_size;
- struct viommu_dev *viommu = vdev->viommu;
- struct viommu_domain *vdomain = to_viommu_domain(domain);
viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
if (viommu_page_size > PAGE_SIZE) {
dev_err(vdev->dev,
"granule 0x%lx larger than system page size 0x%lx\n",
viommu_page_size, PAGE_SIZE);
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
- ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
- viommu->last_domain, GFP_KERNEL);
- if (ret < 0)
- return ret;
+ vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
+ if (!vdomain)
+ return ERR_PTR(-ENOMEM);
- vdomain->id = (unsigned int)ret;
+ spin_lock_init(&vdomain->mappings_lock);
+ vdomain->mappings = RB_ROOT_CACHED;
- domain->pgsize_bitmap = viommu->pgsize_bitmap;
- domain->geometry = viommu->geometry;
+ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+ viommu->last_domain, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(vdomain);
+ return ERR_PTR(ret);
+ }
- vdomain->map_flags = viommu->map_flags;
- vdomain->viommu = viommu;
+ vdomain->id = (unsigned int)ret;
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- if (virtio_has_feature(viommu->vdev,
- VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
- vdomain->bypass = true;
- return 0;
- }
+ vdomain->domain.pgsize_bitmap = viommu->pgsize_bitmap;
+ vdomain->domain.geometry = viommu->geometry;
- ret = viommu_domain_map_identity(vdev, vdomain);
- if (ret) {
- ida_free(&viommu->domain_ids, vdomain->id);
- vdomain->viommu = NULL;
- return ret;
- }
- }
+ vdomain->map_flags = viommu->map_flags;
+ vdomain->viommu = viommu;
- return 0;
+ return &vdomain->domain;
}
static void viommu_domain_free(struct iommu_domain *domain)
@@ -717,29 +708,37 @@ static void viommu_domain_free(struct iommu_domain *domain)
kfree(vdomain);
}
+static struct iommu_domain *viommu_domain_alloc_identity(struct device *dev)
+{
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct iommu_domain *domain;
+ int ret;
+
+ if (virtio_has_feature(vdev->viommu->vdev,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG))
+ return &viommu_identity_domain.domain;
+
+ domain = viommu_domain_alloc_paging(dev);
+ if (IS_ERR(domain))
+ return domain;
+
+ ret = viommu_domain_map_identity(vdev, to_viommu_domain(domain));
+ if (ret) {
+ viommu_domain_free(domain);
+ return ERR_PTR(ret);
+ }
+ return domain;
+}
+
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- int i;
int ret = 0;
struct virtio_iommu_req_attach req;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
struct viommu_domain *vdomain = to_viommu_domain(domain);
- mutex_lock(&vdomain->mutex);
- if (!vdomain->viommu) {
- /*
- * Properly initialize the domain now that we know which viommu
- * owns it.
- */
- ret = viommu_domain_finalise(vdev, domain);
- } else if (vdomain->viommu != vdev->viommu) {
- ret = -EINVAL;
- }
- mutex_unlock(&vdomain->mutex);
-
- if (ret)
- return ret;
+ if (vdomain->viommu != vdev->viommu)
+ return -EINVAL;
/*
* In the virtio-iommu device, when attaching the endpoint to a new
@@ -761,16 +760,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
.domain = cpu_to_le32(vdomain->id),
};
- if (vdomain->bypass)
- req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
-
- for (i = 0; i < fwspec->num_ids; i++) {
- req.endpoint = cpu_to_le32(fwspec->ids[i]);
-
- ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
- if (ret)
- return ret;
- }
+ ret = viommu_send_attach_req(vdomain->viommu, dev, &req);
+ if (ret)
+ return ret;
if (!vdomain->nr_endpoints) {
/*
@@ -788,6 +780,40 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0;
}
+static int viommu_attach_identity_domain(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret = 0;
+ struct virtio_iommu_req_attach req;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+ req = (struct virtio_iommu_req_attach) {
+ .head.type = VIRTIO_IOMMU_T_ATTACH,
+ .domain = cpu_to_le32(vdev->viommu->identity_domain_id),
+ .flags = cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS),
+ };
+
+ ret = viommu_send_attach_req(vdev->viommu, dev, &req);
+ if (ret)
+ return ret;
+
+ if (vdev->vdomain)
+ vdev->vdomain->nr_endpoints--;
+ vdomain->nr_endpoints++;
+ vdev->vdomain = vdomain;
+ return 0;
+}
+
+static struct viommu_domain viommu_identity_domain = {
+ .domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &(const struct iommu_domain_ops) {
+ .attach_dev = viommu_attach_identity_domain,
+ },
+ },
+};
+
static void viommu_detach_dev(struct viommu_endpoint *vdev)
{
int i;
@@ -1062,7 +1088,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_ops viommu_ops = {
.capable = viommu_capable,
- .domain_alloc = viommu_domain_alloc,
+ .domain_alloc_identity = viommu_domain_alloc_identity,
+ .domain_alloc_paging = viommu_domain_alloc_paging,
.probe_device = viommu_probe_device,
.release_device = viommu_release_device,
.device_group = viommu_device_group,
@@ -1184,6 +1211,12 @@ static int viommu_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
+ /* Reserve an ID to use as the bypass domain */
+ if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
+ viommu->identity_domain_id = viommu->first_domain;
+ viommu->first_domain++;
+ }
+
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
virtio_device_ready(vdev);
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index b1e67e514c6a..6d3f8617ef13 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -707,6 +707,7 @@ static int ub913_i2c_master_init(struct ub913_data *priv)
static int ub913_add_i2c_adapter(struct ub913_data *priv)
{
struct device *dev = &priv->client->dev;
+ struct i2c_atr_adap_desc desc = { };
struct fwnode_handle *i2c_handle;
int ret;
@@ -714,8 +715,12 @@ static int ub913_add_i2c_adapter(struct ub913_data *priv)
if (!i2c_handle)
return 0;
- ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port,
- dev, i2c_handle);
+ desc.chan_id = priv->plat_data->port;
+ desc.parent = dev;
+ desc.bus_handle = i2c_handle;
+ desc.num_aliases = 0;
+
+ ret = i2c_atr_add_adapter(priv->plat_data->atr, &desc);
fwnode_handle_put(i2c_handle);
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index 89e3132e81c5..59bd92388845 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -1102,6 +1102,7 @@ static int ub953_register_clkout(struct ub953_data *priv)
static int ub953_add_i2c_adapter(struct ub953_data *priv)
{
struct device *dev = &priv->client->dev;
+ struct i2c_atr_adap_desc desc = { };
struct fwnode_handle *i2c_handle;
int ret;
@@ -1109,8 +1110,12 @@ static int ub953_add_i2c_adapter(struct ub953_data *priv)
if (!i2c_handle)
return 0;
- ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port,
- dev, i2c_handle);
+ desc.chan_id = priv->plat_data->port;
+ desc.parent = dev;
+ desc.bus_handle = i2c_handle;
+ desc.num_aliases = 0;
+
+ ret = i2c_atr_add_adapter(priv->plat_data->atr, &desc);
fwnode_handle_put(i2c_handle);
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
index ed9ace1a5476..082fc62b0f5b 100644
--- a/drivers/media/i2c/ds90ub960.c
+++ b/drivers/media/i2c/ds90ub960.c
@@ -27,6 +27,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/fwnode.h>
@@ -521,7 +522,9 @@ struct ub960_rxport {
};
} eq;
- const struct i2c_client *aliased_clients[UB960_MAX_PORT_ALIASES];
+ /* lock for aliased_addrs and associated registers */
+ struct mutex aliased_addrs_lock;
+ u16 aliased_addrs[UB960_MAX_PORT_ALIASES];
};
struct ub960_asd {
@@ -1264,8 +1267,8 @@ static int ub960_reset(struct ub960_data *priv, bool reset_regs)
* I2C-ATR (address translator)
*/
-static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id,
- const struct i2c_client *client, u16 alias)
+static int ub960_atr_attach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr, u16 alias)
{
struct ub960_data *priv = i2c_atr_get_driver_data(atr);
struct ub960_rxport *rxport = priv->rxports[chan_id];
@@ -1273,20 +1276,22 @@ static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id,
unsigned int reg_idx;
int ret = 0;
- for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
- if (!rxport->aliased_clients[reg_idx])
+ guard(mutex)(&rxport->aliased_addrs_lock);
+
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
+ if (!rxport->aliased_addrs[reg_idx])
break;
}
- if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
+ if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport);
return -EADDRNOTAVAIL;
}
- rxport->aliased_clients[reg_idx] = client;
+ rxport->aliased_addrs[reg_idx] = addr;
ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx),
- client->addr << 1, &ret);
+ addr << 1, &ret);
ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
alias << 1, &ret);
@@ -1294,13 +1299,13 @@ static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id,
return ret;
dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n",
- rxport->nport, client->addr, alias, reg_idx);
+ rxport->nport, addr, alias, reg_idx);
return 0;
}
-static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id,
- const struct i2c_client *client)
+static void ub960_atr_detach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr)
{
struct ub960_data *priv = i2c_atr_get_driver_data(atr);
struct ub960_rxport *rxport = priv->rxports[chan_id];
@@ -1308,34 +1313,36 @@ static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id,
unsigned int reg_idx;
int ret;
- for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
- if (rxport->aliased_clients[reg_idx] == client)
+ guard(mutex)(&rxport->aliased_addrs_lock);
+
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
+ if (rxport->aliased_addrs[reg_idx] == addr)
break;
}
- if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
+ if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
dev_err(dev, "rx%u: client 0x%02x is not mapped!\n",
- rxport->nport, client->addr);
+ rxport->nport, addr);
return;
}
- rxport->aliased_clients[reg_idx] = NULL;
+ rxport->aliased_addrs[reg_idx] = 0;
ret = ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
0, NULL);
if (ret) {
dev_err(dev, "rx%u: unable to fully unmap client 0x%02x: %d\n",
- rxport->nport, client->addr, ret);
+ rxport->nport, addr, ret);
return;
}
dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport,
- client->addr, reg_idx);
+ addr, reg_idx);
}
static const struct i2c_atr_ops ub960_atr_ops = {
- .attach_client = ub960_atr_attach_client,
- .detach_client = ub960_atr_detach_client,
+ .attach_addr = ub960_atr_attach_addr,
+ .detach_addr = ub960_atr_detach_addr,
};
static int ub960_init_atr(struct ub960_data *priv)
@@ -1344,7 +1351,7 @@ static int ub960_init_atr(struct ub960_data *priv)
struct i2c_adapter *parent_adap = priv->client->adapter;
priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops,
- priv->hw_data->num_rxports);
+ priv->hw_data->num_rxports, 0);
if (IS_ERR(priv->atr))
return PTR_ERR(priv->atr);
@@ -2173,7 +2180,6 @@ static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport)
struct device *dev = &priv->client->dev;
struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata;
struct i2c_board_info ser_info = {
- .of_node = to_of_node(rxport->ser.fwnode),
.fwnode = rxport->ser.fwnode,
.platform_data = ser_pdata,
};
@@ -4374,6 +4380,8 @@ static void ub960_rxport_free_ports(struct ub960_data *priv)
fwnode_handle_put(it.rxport->source.ep_fwnode);
fwnode_handle_put(it.rxport->ser.fwnode);
+ mutex_destroy(&it.rxport->aliased_addrs_lock);
+
kfree(it.rxport);
priv->rxports[it.nport] = NULL;
}
@@ -4602,6 +4610,8 @@ static int ub960_parse_dt_rxport(struct ub960_data *priv, unsigned int nport,
if (ret)
goto err_put_remote_fwnode;
+ mutex_init(&rxport->aliased_addrs_lock);
+
return 0;
err_put_remote_fwnode:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6b37d61150ee..c161546d728f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -114,6 +114,18 @@ config RPMB
If unsure, select N.
+config TI_FPC202
+ tristate "TI FPC202 Dual Port Controller"
+ depends on I2C
+ select GPIOLIB
+ select I2C_ATR
+ help
+ If you say yes here you get support for the Texas Instruments FPC202
+ Dual Port Controller.
+
+ This driver can also be built as a module. If so, the module will be
+ called fpc202.
+
config TIFM_CORE
tristate "TI Flash Media interface support"
depends on PCI
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d6c917229c45..054cee9b08a4 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
obj-$(CONFIG_LKDTM) += lkdtm/
+obj-$(CONFIG_TI_FPC202) += ti_fpc202.o
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
diff --git a/drivers/misc/ti_fpc202.c b/drivers/misc/ti_fpc202.c
new file mode 100644
index 000000000000..f7cde245ac95
--- /dev/null
+++ b/drivers/misc/ti_fpc202.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ti_fpc202.c - FPC202 Dual Port Controller driver
+ *
+ * Copyright (C) 2024 Bootlin
+ *
+ */
+
+#include <linux/cleanup.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c-atr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+
+#define FPC202_NUM_PORTS 2
+#define FPC202_ALIASES_PER_PORT 2
+
+/*
+ * GPIO: port mapping
+ *
+ * 0: P0_S0_IN_A
+ * 1: P0_S1_IN_A
+ * 2: P1_S0_IN_A
+ * 3: P1_S1_IN_A
+ * 4: P0_S0_IN_B
+ * ...
+ * 8: P0_S0_IN_C
+ * ...
+ * 12: P0_S0_OUT_A
+ * ...
+ * 16: P0_S0_OUT_B
+ * ...
+ * 19: P1_S1_OUT_B
+ *
+ */
+
+#define FPC202_GPIO_COUNT 20
+#define FPC202_GPIO_P0_S0_IN_B 4
+#define FPC202_GPIO_P0_S0_OUT_A 12
+
+#define FPC202_REG_IN_A_INT 0x6
+#define FPC202_REG_IN_C_IN_B 0x7
+#define FPC202_REG_OUT_A_OUT_B 0x8
+
+#define FPC202_REG_OUT_A_OUT_B_VAL 0xa
+
+#define FPC202_REG_MOD_DEV(port, dev) (0xb4 + ((port) * 4) + (dev))
+#define FPC202_REG_AUX_DEV(port, dev) (0xb6 + ((port) * 4) + (dev))
+
+/*
+ * The FPC202 doesn't support turning off address translation on a single port.
+ * So just set an invalid I2C address as the translation target when no client
+ * address is attached.
+ */
+#define FPC202_REG_DEV_INVALID 0
+
+/* Even aliases are assigned to device 0 and odd aliases to device 1 */
+#define fpc202_dev_num_from_alias(alias) ((alias) % 2)
+
+struct fpc202_priv {
+ struct i2c_client *client;
+ struct i2c_atr *atr;
+ struct gpio_desc *en_gpio;
+ struct gpio_chip gpio;
+
+ /* Lock REG_MOD/AUX_DEV and addr_caches during attach/detach */
+ struct mutex reg_dev_lock;
+
+ /* Cached device addresses for both ports and their devices */
+ u8 addr_caches[2][2];
+
+ /* Keep track of which ports were probed */
+ DECLARE_BITMAP(probed_ports, FPC202_NUM_PORTS);
+};
+
+static void fpc202_fill_alias_table(struct i2c_client *client, u16 *aliases, int port_id)
+{
+ u16 first_alias;
+ int i;
+
+ /*
+ * There is a predefined list of aliases for each FPC202 I2C
+ * self-address. This allows daisy-chained FPC202 units to
+ * automatically take on different sets of aliases.
+ * Each port of an FPC202 unit is assigned two aliases from this list.
+ */
+ first_alias = 0x10 + 4 * port_id + 8 * ((u16)client->addr - 2);
+
+ for (i = 0; i < FPC202_ALIASES_PER_PORT; i++)
+ aliases[i] = first_alias + i;
+}
+
+static int fpc202_gpio_get_dir(int offset)
+{
+ return offset < FPC202_GPIO_P0_S0_OUT_A ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+static int fpc202_read(struct fpc202_priv *priv, u8 reg)
+{
+ int val;
+
+ val = i2c_smbus_read_byte_data(priv->client, reg);
+ return val;
+}
+
+static int fpc202_write(struct fpc202_priv *priv, u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(priv->client, reg, value);
+}
+
+static void fpc202_set_enable(struct fpc202_priv *priv, int enable)
+{
+ if (!priv->en_gpio)
+ return;
+
+ gpiod_set_value(priv->en_gpio, enable);
+}
+
+static void fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct fpc202_priv *priv = gpiochip_get_data(chip);
+ int ret;
+ u8 val;
+
+ if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_IN)
+ return;
+
+ ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B_VAL);
+ if (ret < 0) {
+ dev_err(&priv->client->dev, "Failed to set GPIO %d value! err %d\n", offset, ret);
+ return;
+ }
+
+ val = (u8)ret;
+
+ if (value)
+ val |= BIT(offset - FPC202_GPIO_P0_S0_OUT_A);
+ else
+ val &= ~BIT(offset - FPC202_GPIO_P0_S0_OUT_A);
+
+ fpc202_write(priv, FPC202_REG_OUT_A_OUT_B_VAL, val);
+}
+
+static int fpc202_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct fpc202_priv *priv = gpiochip_get_data(chip);
+ u8 reg, bit;
+ int ret;
+
+ if (offset < FPC202_GPIO_P0_S0_IN_B) {
+ reg = FPC202_REG_IN_A_INT;
+ bit = BIT(4 + offset);
+ } else if (offset < FPC202_GPIO_P0_S0_OUT_A) {
+ reg = FPC202_REG_IN_C_IN_B;
+ bit = BIT(offset - FPC202_GPIO_P0_S0_IN_B);
+ } else {
+ reg = FPC202_REG_OUT_A_OUT_B_VAL;
+ bit = BIT(offset - FPC202_GPIO_P0_S0_OUT_A);
+ }
+
+ ret = fpc202_read(priv, reg);
+ if (ret < 0)
+ return ret;
+
+ return !!(((u8)ret) & bit);
+}
+
+static int fpc202_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_OUT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fpc202_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct fpc202_priv *priv = gpiochip_get_data(chip);
+ int ret;
+ u8 val;
+
+ if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_IN)
+ return -EINVAL;
+
+ fpc202_gpio_set(chip, offset, value);
+
+ ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B);
+ if (ret < 0)
+ return ret;
+
+ val = (u8)ret | BIT(offset - FPC202_GPIO_P0_S0_OUT_A);
+
+ return fpc202_write(priv, FPC202_REG_OUT_A_OUT_B, val);
+}
+
+/*
+ * Set the translation table entry associated with a port and device number.
+ *
+ * Each downstream port of the FPC202 has two fixed aliases corresponding to
+ * device numbers 0 and 1. If one of these aliases is found in an incoming I2C
+ * transfer, it will be translated to the address given by the corresponding
+ * translation table entry.
+ */
+static int fpc202_write_dev_addr(struct fpc202_priv *priv, u32 port_id, int dev_num, u16 addr)
+{
+ int ret, reg_mod, reg_aux;
+ u8 val;
+
+ guard(mutex)(&priv->reg_dev_lock);
+
+ reg_mod = FPC202_REG_MOD_DEV(port_id, dev_num);
+ reg_aux = FPC202_REG_AUX_DEV(port_id, dev_num);
+ val = addr & 0x7f;
+
+ ret = fpc202_write(priv, reg_mod, val);
+ if (ret)
+ return ret;
+
+ /*
+ * The FPC202 datasheet is unclear about the role of the AUX registers.
+ * Empirically, writing to them as well seems to be necessary for
+ * address translation to function properly.
+ */
+ ret = fpc202_write(priv, reg_aux, val);
+
+ priv->addr_caches[port_id][dev_num] = val;
+
+ return ret;
+}
+
+static int fpc202_attach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr, u16 alias)
+{
+ struct fpc202_priv *priv = i2c_atr_get_driver_data(atr);
+
+ dev_dbg(&priv->client->dev, "attaching address 0x%02x to alias 0x%02x\n", addr, alias);
+
+ return fpc202_write_dev_addr(priv, chan_id, fpc202_dev_num_from_alias(alias), addr);
+}
+
+static void fpc202_detach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr)
+{
+ struct fpc202_priv *priv = i2c_atr_get_driver_data(atr);
+ int dev_num, reg_mod, val;
+
+ for (dev_num = 0; dev_num < 2; dev_num++) {
+ reg_mod = FPC202_REG_MOD_DEV(chan_id, dev_num);
+
+ mutex_lock(&priv->reg_dev_lock);
+
+ val = priv->addr_caches[chan_id][dev_num];
+
+ mutex_unlock(&priv->reg_dev_lock);
+
+ if (val < 0) {
+ dev_err(&priv->client->dev, "failed to read register 0x%x while detaching address 0x%02x\n",
+ reg_mod, addr);
+ return;
+ }
+
+ if (val == (addr & 0x7f)) {
+ fpc202_write_dev_addr(priv, chan_id, dev_num, FPC202_REG_DEV_INVALID);
+ return;
+ }
+ }
+}
+
+static const struct i2c_atr_ops fpc202_atr_ops = {
+ .attach_addr = fpc202_attach_addr,
+ .detach_addr = fpc202_detach_addr,
+};
+
+static int fpc202_probe_port(struct fpc202_priv *priv, struct device_node *i2c_handle, int port_id)
+{
+ u16 aliases[FPC202_ALIASES_PER_PORT] = { };
+ struct device *dev = &priv->client->dev;
+ struct i2c_atr_adap_desc desc = { };
+ int ret = 0;
+
+ desc.chan_id = port_id;
+ desc.parent = dev;
+ desc.bus_handle = of_node_to_fwnode(i2c_handle);
+ desc.num_aliases = FPC202_ALIASES_PER_PORT;
+
+ fpc202_fill_alias_table(priv->client, aliases, port_id);
+ desc.aliases = aliases;
+
+ ret = i2c_atr_add_adapter(priv->atr, &desc);
+ if (ret)
+ return ret;
+
+ set_bit(port_id, priv->probed_ports);
+
+ ret = fpc202_write_dev_addr(priv, port_id, 0, FPC202_REG_DEV_INVALID);
+ if (ret)
+ return ret;
+
+ return fpc202_write_dev_addr(priv, port_id, 1, FPC202_REG_DEV_INVALID);
+}
+
+static void fpc202_remove_port(struct fpc202_priv *priv, int port_id)
+{
+ i2c_atr_del_adapter(priv->atr, port_id);
+ clear_bit(port_id, priv->probed_ports);
+}
+
+static int fpc202_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *i2c_handle;
+ struct fpc202_priv *priv;
+ int ret, port_id;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->reg_dev_lock);
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->en_gpio)) {
+ ret = PTR_ERR(priv->en_gpio);
+ dev_err(dev, "failed to fetch enable GPIO! err %d\n", ret);
+ goto destroy_mutex;
+ }
+
+ priv->gpio.label = "gpio-fpc202";
+ priv->gpio.base = -1;
+ priv->gpio.direction_input = fpc202_gpio_direction_input;
+ priv->gpio.direction_output = fpc202_gpio_direction_output;
+ priv->gpio.set = fpc202_gpio_set;
+ priv->gpio.get = fpc202_gpio_get;
+ priv->gpio.ngpio = FPC202_GPIO_COUNT;
+ priv->gpio.parent = dev;
+ priv->gpio.owner = THIS_MODULE;
+
+ ret = gpiochip_add_data(&priv->gpio, priv);
+ if (ret) {
+ priv->gpio.parent = NULL;
+ dev_err(dev, "failed to add gpiochip err %d\n", ret);
+ goto disable_gpio;
+ }
+
+ priv->atr = i2c_atr_new(client->adapter, dev, &fpc202_atr_ops, 2, 0);
+ if (IS_ERR(priv->atr)) {
+ ret = PTR_ERR(priv->atr);
+ dev_err(dev, "failed to create i2c atr err %d\n", ret);
+ goto disable_gpio;
+ }
+
+ i2c_atr_set_driver_data(priv->atr, priv);
+
+ bitmap_zero(priv->probed_ports, FPC202_NUM_PORTS);
+
+ for_each_child_of_node(dev->of_node, i2c_handle) {
+ ret = of_property_read_u32(i2c_handle, "reg", &port_id);
+ if (ret) {
+ if (ret == -EINVAL)
+ continue;
+
+ dev_err(dev, "failed to read 'reg' property of child node, err %d\n", ret);
+ goto unregister_chans;
+ }
+
+ if (port_id > FPC202_NUM_PORTS) {
+ dev_err(dev, "port ID %d is out of range!\n", port_id);
+ ret = -EINVAL;
+ goto unregister_chans;
+ }
+
+ ret = fpc202_probe_port(priv, i2c_handle, port_id);
+ if (ret) {
+ dev_err(dev, "Failed to probe port %d, err %d\n", port_id, ret);
+ goto unregister_chans;
+ }
+ }
+
+ goto out;
+
+unregister_chans:
+ for_each_set_bit(port_id, priv->probed_ports, FPC202_NUM_PORTS)
+ fpc202_remove_port(priv, port_id);
+
+ i2c_atr_delete(priv->atr);
+disable_gpio:
+ fpc202_set_enable(priv, 0);
+ gpiochip_remove(&priv->gpio);
+destroy_mutex:
+ mutex_destroy(&priv->reg_dev_lock);
+out:
+ return ret;
+}
+
+static void fpc202_remove(struct i2c_client *client)
+{
+ struct fpc202_priv *priv = i2c_get_clientdata(client);
+ int port_id;
+
+ for_each_set_bit(port_id, priv->probed_ports, FPC202_NUM_PORTS)
+ fpc202_remove_port(priv, port_id);
+
+ mutex_destroy(&priv->reg_dev_lock);
+
+ i2c_atr_delete(priv->atr);
+
+ fpc202_set_enable(priv, 0);
+ gpiochip_remove(&priv->gpio);
+}
+
+static const struct of_device_id fpc202_of_match[] = {
+ { .compatible = "ti,fpc202" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fpc202_of_match);
+
+static struct i2c_driver fpc202_driver = {
+ .driver = {
+ .name = "fpc202",
+ .of_match_table = fpc202_of_match,
+ },
+ .probe = fpc202_probe,
+ .remove = fpc202_remove,
+};
+
+module_i2c_driver(fpc202_driver);
+
+MODULE_AUTHOR("Romain Gantois <romain.gantois@bootlin.com>");
+MODULE_DESCRIPTION("TI FPC202 Dual Port Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("I2C_ATR");
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index bdc2e6fda782..42e7d2a2a90c 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -465,40 +465,6 @@ static void uacce_release(struct device *dev)
kfree(uacce);
}
-static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
-{
- int ret;
-
- if (!(flags & UACCE_DEV_SVA))
- return flags;
-
- flags &= ~UACCE_DEV_SVA;
-
- ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF);
- if (ret) {
- dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret));
- return flags;
- }
-
- ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
- if (ret) {
- dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret));
- iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
- return flags;
- }
-
- return flags | UACCE_DEV_SVA;
-}
-
-static void uacce_disable_sva(struct uacce_device *uacce)
-{
- if (!(uacce->flags & UACCE_DEV_SVA))
- return;
-
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
-}
-
/**
* uacce_alloc() - alloc an accelerator
* @parent: pointer of uacce parent device
@@ -518,8 +484,6 @@ struct uacce_device *uacce_alloc(struct device *parent,
if (!uacce)
return ERR_PTR(-ENOMEM);
- flags = uacce_enable_sva(parent, flags);
-
uacce->parent = parent;
uacce->flags = flags;
uacce->ops = interface->ops;
@@ -542,7 +506,6 @@ struct uacce_device *uacce_alloc(struct device *parent,
return uacce;
err_with_uacce:
- uacce_disable_sva(uacce);
kfree(uacce);
return ERR_PTR(ret);
}
@@ -605,9 +568,6 @@ void uacce_remove(struct uacce_device *uacce)
unmap_mapping_range(q->mapping, 0, 0, 1);
}
- /* disable sva now since no opened queues */
- uacce_disable_sva(uacce);
-
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
xa_erase(&uacce_xa, uacce->dev_id);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 57bd49eea777..66c0d1ba2a33 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1882,6 +1882,11 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
if (IS_ERR_OR_NULL(ice))
return PTR_ERR_OR_ZERO(ice);
+ if (qcom_ice_get_supported_key_type(ice) != BLK_CRYPTO_KEY_TYPE_RAW) {
+ dev_warn(dev, "Wrapped keys not supported. Disabling inline encryption support.\n");
+ return 0;
+ }
+
msm_host->ice = ice;
/* Initialize the blk_crypto_profile */
@@ -1962,16 +1967,7 @@ static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile,
struct sdhci_msm_host *msm_host =
sdhci_msm_host_from_crypto_profile(profile);
- /* Only AES-256-XTS has been tested so far. */
- if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
- return -EOPNOTSUPP;
-
- return qcom_ice_program_key(msm_host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->bytes,
- key->crypto_cfg.data_unit_size / 512,
- slot);
+ return qcom_ice_program_key(msm_host->ice, slot, key);
}
static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 4ffaf7588885..3504507477c6 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -391,6 +391,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
case GDMA_EQE_HWC_INIT_DATA:
case GDMA_EQE_HWC_INIT_DONE:
+ case GDMA_EQE_HWC_SOC_SERVICE:
case GDMA_EQE_RNIC_QP_FATAL:
if (!eq->eq.callback)
break;
@@ -964,6 +965,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
err, resp.hdr.status);
return err ? err : -EPROTO;
}
+ gc->pf_cap_flags1 = resp.pf_cap_flags1;
if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
if (err) {
@@ -1004,7 +1006,6 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_register_device, "NET_MANA");
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -1035,7 +1036,6 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_deregister_device, "NET_MANA");
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1469,10 +1469,14 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_gd_init_registers(pdev);
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
+ gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0);
+ if (!gc->service_wq)
+ return -ENOMEM;
+
err = mana_gd_setup_irqs(pdev);
if (err) {
dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
- return err;
+ goto free_workqueue;
}
err = mana_hwc_create_channel(gc);
@@ -1498,6 +1502,8 @@ destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+free_workqueue:
+ destroy_workqueue(gc->service_wq);
dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1509,6 +1515,8 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+
+ destroy_workqueue(gc->service_wq);
dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
@@ -1578,8 +1586,14 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto cleanup_gd;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ goto cleanup_mana;
+
return 0;
+cleanup_mana:
+ mana_remove(&gc->mana, false);
cleanup_gd:
mana_gd_cleanup(pdev);
unmap_bar:
@@ -1607,6 +1621,7 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, false);
mana_gd_cleanup(pdev);
@@ -1630,6 +1645,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
@@ -1654,6 +1670,10 @@ static int mana_gd_resume(struct pci_dev *pdev)
if (err)
return err;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ return err;
+
return 0;
}
@@ -1664,6 +1684,7 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 1ba49602089b..a8c4d8db75a5 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -112,11 +112,13 @@ out:
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
struct gdma_event *event)
{
+ union hwc_init_soc_service_type service_data;
struct hw_channel_context *hwc = ctx;
struct gdma_dev *gd = hwc->gdma_dev;
union hwc_init_type_data type_data;
union hwc_init_eq_id_db eq_db;
u32 type, val;
+ int ret;
switch (event->type) {
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
@@ -199,7 +201,24 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
}
break;
+ case GDMA_EQE_HWC_SOC_SERVICE:
+ service_data.as_uint32 = event->details[0];
+ type = service_data.type;
+ switch (type) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ ret = mana_rdma_service_event(gd->gdma_context, type);
+ if (ret)
+ dev_err(hwc->dev, "Failed to schedule adev service event: %d\n",
+ ret);
+ break;
+ default:
+ dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type);
+ break;
+ }
+
+ break;
default:
dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
/* Ignore unknown events, which should never happen. */
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 9c58d9e0bbb5..ccd2885c939e 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2950,7 +2950,7 @@ static void remove_adev(struct gdma_dev *gd)
gd->adev = NULL;
}
-static int add_adev(struct gdma_dev *gd)
+static int add_adev(struct gdma_dev *gd, const char *name)
{
struct auxiliary_device *adev;
struct mana_adev *madev;
@@ -2966,7 +2966,7 @@ static int add_adev(struct gdma_dev *gd)
goto idx_fail;
adev->id = ret;
- adev->name = "rdma";
+ adev->name = name;
adev->dev.parent = gd->gdma_context->dev;
adev->dev.release = adev_release;
madev->mdev = gd;
@@ -2998,6 +2998,70 @@ idx_fail:
return ret;
}
+static void mana_rdma_service_handle(struct work_struct *work)
+{
+ struct mana_service_work *serv_work =
+ container_of(work, struct mana_service_work, work);
+ struct gdma_dev *gd = serv_work->gdma_dev;
+ struct device *dev = gd->gdma_context->dev;
+ int ret;
+
+ if (READ_ONCE(gd->rdma_teardown))
+ goto out;
+
+ switch (serv_work->event) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ if (!gd->adev || gd->is_suspended)
+ break;
+
+ remove_adev(gd);
+ gd->is_suspended = true;
+ break;
+
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ if (!gd->is_suspended)
+ break;
+
+ ret = add_adev(gd, "rdma");
+ if (ret)
+ dev_err(dev, "Failed to add adev on resume: %d\n", ret);
+ else
+ gd->is_suspended = false;
+ break;
+
+ default:
+ dev_warn(dev, "unknown adev service event %u\n",
+ serv_work->event);
+ break;
+ }
+
+out:
+ kfree(serv_work);
+}
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
+{
+ struct gdma_dev *gd = &gc->mana_ib;
+ struct mana_service_work *serv_work;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return 0;
+ }
+
+ serv_work = kzalloc(sizeof(*serv_work), GFP_ATOMIC);
+ if (!serv_work)
+ return -ENOMEM;
+
+ serv_work->event = event;
+ serv_work->gdma_dev = gd;
+
+ INIT_WORK(&serv_work->work, mana_rdma_service_handle);
+ queue_work(gc->service_wq, &serv_work->work);
+
+ return 0;
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
@@ -3085,7 +3149,7 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
}
- err = add_adev(gd);
+ err = add_adev(gd, "eth");
out:
if (err) {
mana_remove(gd, false);
@@ -3159,6 +3223,44 @@ out:
dev_dbg(dev, "%s succeeded\n", __func__);
}
+int mana_rdma_probe(struct gdma_dev *gd)
+{
+ int err = 0;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return err;
+ }
+
+ err = mana_gd_register_device(gd);
+ if (err)
+ return err;
+
+ err = add_adev(gd, "rdma");
+ if (err)
+ mana_gd_deregister_device(gd);
+
+ return err;
+}
+
+void mana_rdma_remove(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return;
+ }
+
+ WRITE_ONCE(gd->rdma_teardown, true);
+ flush_workqueue(gc->service_wq);
+
+ if (gd->adev)
+ remove_adev(gd);
+
+ mana_gd_deregister_device(gd);
+}
+
struct net_device *mana_get_primary_netdev(struct mana_context *ac,
u32 port_index,
netdevice_tracker *tracker)
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 464cc9aca157..33db9104df17 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -103,12 +103,10 @@ config PINCTRL_AS3722
config PINCTRL_AT91
bool "AT91 pinctrl driver"
- depends on OF
- depends on ARCH_AT91
+ depends on (OF && ARCH_AT91) || COMPILE_TEST
select PINMUX
select PINCONF
select GPIOLIB
- select OF_GPIO
select GPIOLIB_IRQCHIP
help
Say Y here to enable the at91 pinctrl driver
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index d49b77dcfcff..86f3d5c69e36 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -598,7 +598,7 @@ static int owl_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
const struct owl_gpio_port *port;
@@ -607,13 +607,15 @@ static void owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
port = owl_gpio_get_port(pctrl, &offset);
if (WARN_ON(port == NULL))
- return;
+ return -ENODEV;
gpio_base = pctrl->base + port->offset;
raw_spin_lock_irqsave(&pctrl->lock, flags);
owl_gpio_update_reg(gpio_base + port->dat, offset, value);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
}
static int owl_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -960,7 +962,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
pctrl->chip.direction_input = owl_gpio_direction_input;
pctrl->chip.direction_output = owl_gpio_direction_output;
pctrl->chip.get = owl_gpio_get;
- pctrl->chip.set = owl_gpio_set;
+ pctrl->chip.set_rv = owl_gpio_set;
pctrl->chip.request = owl_gpio_request;
pctrl->chip.free = owl_gpio_free;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index eaeec096bc9a..826827800474 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -356,11 +356,14 @@ static int bcm2835_gpio_get_direction(struct gpio_chip *chip, unsigned int offse
return GPIO_LINE_DIRECTION_IN;
}
-static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int bcm2835_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset);
+
+ return 0;
}
static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
@@ -394,7 +397,7 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.direction_output = bcm2835_gpio_direction_output,
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
- .set = bcm2835_gpio_set,
+ .set_rv = bcm2835_gpio_set,
.set_config = gpiochip_generic_config,
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
@@ -411,7 +414,7 @@ static const struct gpio_chip bcm2711_gpio_chip = {
.direction_output = bcm2835_gpio_direction_output,
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
- .set = bcm2835_gpio_set,
+ .set_rv = bcm2835_gpio_set,
.set_config = gpiochip_generic_config,
.base = -1,
.ngpio = BCM2711_NUM_GPIOS,
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index c9a3d3aa8c10..1d08b8d4cdd7 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -390,7 +390,7 @@ static int iproc_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
return GPIO_LINE_DIRECTION_IN;
}
-static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int iproc_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
@@ -400,6 +400,8 @@ static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
+
+ return 0;
}
static int iproc_gpio_get(struct gpio_chip *gc, unsigned gpio)
@@ -863,7 +865,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
gc->direction_input = iproc_gpio_direction_input;
gc->direction_output = iproc_gpio_direction_output;
gc->get_direction = iproc_gpio_get_direction;
- gc->set = iproc_gpio_set;
+ gc->set_rv = iproc_gpio_set;
gc->get = iproc_gpio_get;
chip->pinmux_is_supported = of_property_read_bool(dev->of_node,
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index a96be8f244e0..b08f8480ddc6 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -310,7 +310,7 @@ static int nsp_gpio_get_direction(struct gpio_chip *gc, unsigned gpio)
return !val;
}
-static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int nsp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
@@ -320,6 +320,8 @@ static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
+
+ return 0;
}
static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
@@ -654,7 +656,7 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
gc->get_direction = nsp_gpio_get_direction;
- gc->set = nsp_gpio_set;
+ gc->set_rv = nsp_gpio_set;
gc->get = nsp_gpio_get;
/* optional GPIO interrupt support */
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 4bdbf6bb26e2..9046292d1360 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1530,6 +1530,35 @@ void pinctrl_unregister_mappings(const struct pinctrl_map *map)
}
EXPORT_SYMBOL_GPL(pinctrl_unregister_mappings);
+static void devm_pinctrl_unregister_mappings(void *maps)
+{
+ pinctrl_unregister_mappings(maps);
+}
+
+/**
+ * devm_pinctrl_register_mappings() - Resource managed pinctrl_register_mappings()
+ * @dev: device for which mappings are registered
+ * @maps: the pincontrol mappings table to register. Note the pinctrl-core
+ * keeps a reference to the passed in maps, so they should _not_ be
+ * marked with __initdata.
+ * @num_maps: the number of maps in the mapping table
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int devm_pinctrl_register_mappings(struct device *dev,
+ const struct pinctrl_map *maps,
+ unsigned int num_maps)
+{
+ int ret;
+
+ ret = pinctrl_register_mappings(maps, num_maps);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pinctrl_unregister_mappings, (void *)maps);
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_register_mappings);
+
/**
* pinctrl_force_sleep() - turn a given controller device into sleep state
* @pctldev: pin controller device
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 4c420b21b804..8d24decd3f07 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -20,7 +20,9 @@ config PINCTRL_IMX_SCMI
config PINCTRL_IMX_SCU
tristate
+ depends on OF
depends on IMX_SCU || COMPILE_TEST
+ default IMX_SCU
select PINCTRL_IMX
config PINCTRL_IMX1_CORE
@@ -159,6 +161,7 @@ config PINCTRL_IMX8MM
tristate "IMX8MM pinctrl driver"
depends on OF
depends on SOC_IMX8M || COMPILE_TEST
+ default SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mm pinctrl driver
@@ -167,6 +170,7 @@ config PINCTRL_IMX8MN
tristate "IMX8MN pinctrl driver"
depends on OF
depends on SOC_IMX8M || COMPILE_TEST
+ default SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mn pinctrl driver
@@ -175,6 +179,7 @@ config PINCTRL_IMX8MP
tristate "IMX8MP pinctrl driver"
depends on OF
depends on SOC_IMX8M || COMPILE_TEST
+ default SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mp pinctrl driver
@@ -183,6 +188,7 @@ config PINCTRL_IMX8MQ
tristate "IMX8MQ pinctrl driver"
depends on OF
depends on SOC_IMX8M || COMPILE_TEST
+ default SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mq pinctrl driver
@@ -191,6 +197,7 @@ config PINCTRL_IMX8QM
tristate "IMX8QM pinctrl driver"
depends on OF
depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST
+ default ARCH_MXC
select PINCTRL_IMX_SCU
help
Say Y here to enable the imx8qm pinctrl driver
@@ -199,6 +206,7 @@ config PINCTRL_IMX8QXP
tristate "IMX8QXP pinctrl driver"
depends on OF
depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST
+ default ARCH_MXC
select PINCTRL_IMX_SCU
help
Say Y here to enable the imx8qxp pinctrl driver
@@ -207,6 +215,7 @@ config PINCTRL_IMX8DXL
tristate "IMX8DXL pinctrl driver"
depends on OF
depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST
+ default ARCH_MXC
select PINCTRL_IMX_SCU
help
Say Y here to enable the imx8dxl pinctrl driver
@@ -215,6 +224,7 @@ config PINCTRL_IMX8ULP
tristate "IMX8ULP pinctrl driver"
depends on OF
depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8ulp pinctrl driver
@@ -239,6 +249,7 @@ config PINCTRL_IMX93
tristate "IMX93 pinctrl driver"
depends on OF
depends on ARCH_MXC || COMPILE_TEST
+ default SOC_IMX9
select PINCTRL_IMX
help
Say Y here to enable the imx93 pinctrl driver
diff --git a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
index 8f15c4c4dc44..4e8ab919b334 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
@@ -51,6 +51,7 @@ struct scmi_pinctrl_imx {
#define IMX_SCMI_PIN_SIZE 24
#define IMX95_DAISY_OFF 0x408
+#define IMX94_DAISY_OFF 0x608
static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np,
@@ -70,6 +71,8 @@ static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev,
if (!daisy_off) {
if (of_machine_is_compatible("fsl,imx95")) {
daisy_off = IMX95_DAISY_OFF;
+ } else if (of_machine_is_compatible("fsl,imx94")) {
+ daisy_off = IMX94_DAISY_OFF;
} else {
dev_err(pctldev->dev, "platform not support scmi pinctrl\n");
return -EINVAL;
@@ -289,6 +292,7 @@ scmi_pinctrl_imx_get_pins(struct scmi_pinctrl_imx *pmx, struct pinctrl_desc *des
static const char * const scmi_pinctrl_imx_allowlist[] = {
"fsl,imx95",
+ "fsl,imx94",
NULL
};
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 58f32818a0e6..2d15af6be276 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -181,6 +181,16 @@ config PINCTRL_MT6797
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT6893
+ bool "MediaTek Dimensity MT6893 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+ help
+ Say yes here to support pin controller and gpio driver
+ on the MediaTek Dimensity 1200 MT6893 Smartphone SoC.
+
config PINCTRL_MT7622
bool "MediaTek MT7622 pin control"
depends on OF
@@ -263,6 +273,18 @@ config PINCTRL_MT8195
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT8196
+ bool "MediaTek MT8196 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+ help
+ Say yes here to support pin controller and gpio driver
+ on MediaTek MT8196 SoC.
+ In MTK platform, we support virtual gpio and use it to
+ map specific eint which doesn't have real gpio pin.
+
config PINCTRL_MT8365
bool "MediaTek MT8365 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 721ae83476d0..7518980fba59 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_PINCTRL_MT6765) += pinctrl-mt6765.o
obj-$(CONFIG_PINCTRL_MT6779) += pinctrl-mt6779.o
obj-$(CONFIG_PINCTRL_MT6795) += pinctrl-mt6795.o
obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o
+obj-$(CONFIG_PINCTRL_MT6893) += pinctrl-mt6893.o
obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o
obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_PINCTRL_MT8186) += pinctrl-mt8186.o
obj-$(CONFIG_PINCTRL_MT8188) += pinctrl-mt8188.o
obj-$(CONFIG_PINCTRL_MT8192) += pinctrl-mt8192.o
obj-$(CONFIG_PINCTRL_MT8195) += pinctrl-mt8195.o
+obj-$(CONFIG_PINCTRL_MT8196) += pinctrl-mt8196.o
obj-$(CONFIG_PINCTRL_MT8365) += pinctrl-mt8365.o
obj-$(CONFIG_PINCTRL_MT8516) += pinctrl-mt8516.o
obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index e20aaba0a33a..d906a5e4101f 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -22,7 +22,6 @@
#include <linux/platform_device.h>
#include "mtk-eint.h"
-#include "pinctrl-mtk-common-v2.h"
#define MTK_EINT_EDGE_SENSITIVE 0
#define MTK_EINT_LEVEL_SENSITIVE 1
@@ -505,10 +504,9 @@ int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
}
EXPORT_SYMBOL_GPL(mtk_eint_find_irq);
-int mtk_eint_do_init(struct mtk_eint *eint)
+int mtk_eint_do_init(struct mtk_eint *eint, struct mtk_eint_pin *eint_pin)
{
- unsigned int size, i, port, inst = 0;
- struct mtk_pinctrl *hw = (struct mtk_pinctrl *)eint->pctl;
+ unsigned int size, i, port, virq, inst = 0;
/* If clients don't assign a specific regs, let's use generic one */
if (!eint->regs)
@@ -519,7 +517,15 @@ int mtk_eint_do_init(struct mtk_eint *eint)
if (!eint->base_pin_num)
return -ENOMEM;
- if (eint->nbase == 1) {
+ if (eint_pin) {
+ eint->pins = eint_pin;
+ for (i = 0; i < eint->hw->ap_num; i++) {
+ inst = eint->pins[i].instance;
+ if (inst >= eint->nbase)
+ continue;
+ eint->base_pin_num[inst]++;
+ }
+ } else {
size = eint->hw->ap_num * sizeof(struct mtk_eint_pin);
eint->pins = devm_kmalloc(eint->dev, size, GFP_KERNEL);
if (!eint->pins)
@@ -533,16 +539,6 @@ int mtk_eint_do_init(struct mtk_eint *eint)
}
}
- if (hw && hw->soc && hw->soc->eint_pin) {
- eint->pins = hw->soc->eint_pin;
- for (i = 0; i < eint->hw->ap_num; i++) {
- inst = eint->pins[i].instance;
- if (inst >= eint->nbase)
- continue;
- eint->base_pin_num[inst]++;
- }
- }
-
eint->pin_list = devm_kmalloc(eint->dev, eint->nbase * sizeof(u16 *), GFP_KERNEL);
if (!eint->pin_list)
goto err_pin_list;
@@ -583,7 +579,7 @@ int mtk_eint_do_init(struct mtk_eint *eint)
if (inst >= eint->nbase)
continue;
eint->pin_list[inst][eint->pins[i].index] = i;
- int virq = irq_create_mapping(eint->domain, i);
+ virq = irq_create_mapping(eint->domain, i);
irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
handle_level_irq);
irq_set_chip_data(virq, eint);
@@ -609,7 +605,7 @@ err_cur_mask:
err_wake_mask:
devm_kfree(eint->dev, eint->pin_list);
err_pin_list:
- if (eint->nbase == 1)
+ if (!eint_pin)
devm_kfree(eint->dev, eint->pins);
err_pins:
devm_kfree(eint->dev, eint->base_pin_num);
diff --git a/drivers/pinctrl/mediatek/mtk-eint.h b/drivers/pinctrl/mediatek/mtk-eint.h
index f7f58cca0d5e..fc31a4c0c77b 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.h
+++ b/drivers/pinctrl/mediatek/mtk-eint.h
@@ -66,7 +66,7 @@ struct mtk_eint_xt {
struct mtk_eint {
struct device *dev;
void __iomem **base;
- u8 nbase;
+ int nbase;
u16 *base_pin_num;
struct irq_domain *domain;
int irq;
@@ -88,7 +88,7 @@ struct mtk_eint {
};
#if IS_ENABLED(CONFIG_EINT_MTK)
-int mtk_eint_do_init(struct mtk_eint *eint);
+int mtk_eint_do_init(struct mtk_eint *eint, struct mtk_eint_pin *eint_pin);
int mtk_eint_do_suspend(struct mtk_eint *eint);
int mtk_eint_do_resume(struct mtk_eint *eint);
int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
@@ -96,7 +96,8 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n);
#else
-static inline int mtk_eint_do_init(struct mtk_eint *eint)
+static inline int mtk_eint_do_init(struct mtk_eint *eint,
+ struct mtk_eint_pin *eint_pin)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index 5d84a778683d..b97b28ebb37a 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -2247,15 +2247,16 @@ static int airoha_convert_pin_to_reg_offset(struct pinctrl_dev *pctrl_dev,
}
/* gpio callbacks */
-static void airoha_gpio_set(struct gpio_chip *chip, unsigned int gpio,
- int value)
+static int airoha_gpio_set(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct airoha_pinctrl *pinctrl = gpiochip_get_data(chip);
u32 offset = gpio % AIROHA_PIN_BANK_SIZE;
u8 index = gpio / AIROHA_PIN_BANK_SIZE;
- regmap_update_bits(pinctrl->regmap, pinctrl->gpiochip.data[index],
- BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(pinctrl->regmap,
+ pinctrl->gpiochip.data[index],
+ BIT(offset), value ? BIT(offset) : 0);
}
static int airoha_gpio_get(struct gpio_chip *chip, unsigned int gpio)
@@ -2280,9 +2281,7 @@ static int airoha_gpio_direction_output(struct gpio_chip *chip,
if (err)
return err;
- airoha_gpio_set(chip, gpio, value);
-
- return 0;
+ return airoha_gpio_set(chip, gpio, value);
}
/* irq callbacks */
@@ -2419,7 +2418,7 @@ static int airoha_pinctrl_add_gpiochip(struct airoha_pinctrl *pinctrl,
gc->free = gpiochip_generic_free;
gc->direction_input = pinctrl_gpio_direction_input;
gc->direction_output = airoha_gpio_direction_output;
- gc->set = airoha_gpio_set;
+ gc->set_rv = airoha_gpio_set;
gc->get = airoha_gpio_get;
gc->base = -1;
gc->ngpio = AIROHA_NUM_PINS;
@@ -2715,9 +2714,7 @@ static int airoha_pinconf_set_pin_value(struct pinctrl_dev *pctrl_dev,
if (pin < 0)
return pin;
- airoha_gpio_set(&pinctrl->gpiochip.chip, pin, value);
-
- return 0;
+ return airoha_gpio_set(&pinctrl->gpiochip.chip, pin, value);
}
static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index aad4891223d3..827d0f191031 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -496,24 +496,26 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return !!value;
}
-static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct mtk_pinctrl *hw = gpiochip_get_data(chip);
const struct mtk_pin_desc *desc;
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
- if (!desc->name) {
- dev_err(hw->dev, "Failed to set gpio %d\n", gpio);
- return;
- }
+ if (!desc->name)
+ return -ENOTSUPP;
- mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
}
static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
int value)
{
- mtk_gpio_set(chip, gpio, value);
+ int ret;
+
+ ret = mtk_gpio_set(chip, gpio, value);
+ if (ret)
+ return ret;
return pinctrl_gpio_direction_output(chip, gpio);
}
@@ -567,7 +569,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->direction_input = pinctrl_gpio_direction_input;
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
- chip->set = mtk_gpio_set;
+ chip->set_rv = mtk_gpio_set;
chip->to_irq = mtk_gpio_to_irq;
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6893.c b/drivers/pinctrl/mediatek/pinctrl-mt6893.c
new file mode 100644
index 000000000000..468ce0109b07
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6893.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Copyright (C) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/module.h>
+#include "pinctrl-mtk-mt6893.h"
+#include "pinctrl-paris.h"
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt6893_pin_mode_range[] = {
+ PIN_FIELD(0, 219, 0x0300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_dir_range[] = {
+ PIN_FIELD(0, 219, 0x0000, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_di_range[] = {
+ PIN_FIELD(0, 219, 0x0200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_do_range[] = {
+ PIN_FIELD(0, 219, 0x0100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_smt_range[] = {
+ PINS_FIELD_BASE(0, 9, 2, 0x00f0, 0x10, 7, 1),
+ PINS_FIELD_BASE(10, 15, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(16, 17, 5, 0x00c0, 0x10, 8, 1),
+ PINS_FIELD_BASE(18, 25, 7, 0x00f0, 0x10, 1, 1),
+ PINS_FIELD_BASE(26, 30, 6, 0x00e0, 0x10, 6, 1),
+ PINS_FIELD_BASE(31, 35, 6, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(36, 36, 6, 0x00e0, 0x10, 16, 1),
+ PINS_FIELD_BASE(37, 39, 6, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(40, 41, 6, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 6, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(43, 44, 6, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0090, 0x10, 6, 1),
+ PINS_FIELD_BASE(53, 56, 3, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(57, 60, 3, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 4, 0x0050, 0x10, 1, 1),
+ PINS_FIELD_BASE(63, 73, 3, 0x0090, 0x10, 0, 1),
+ PINS_FIELD_BASE(74, 84, 4, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 86, 4, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(87, 88, 4, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(89, 90, 2, 0x00f0, 0x10, 26, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x00f0, 0x10, 0, 1),
+ PINS_FIELD_BASE(92, 95, 2, 0x0100, 0x10, 0, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x00f0, 0x10, 30, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x00f0, 0x10, 28, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00f0, 0x10, 31, 1),
+ PINS_FIELD_BASE(99, 102, 2, 0x00f0, 0x10, 29, 1),
+ PINS_FIELD_BASE(103, 105, 2, 0x00f0, 0x10, 24, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00f0, 0x10, 25, 1),
+ PIN_FIELD_BASE(107, 108, 2, 0x00f0, 0x10, 5, 1),
+ PINS_FIELD_BASE(109, 113, 2, 0x00f0, 0x10, 8, 1),
+ PINS_FIELD_BASE(114, 116, 2, 0x00f0, 0x10, 16, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00f0, 0x10, 17, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00f0, 0x10, 18, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x00f0, 0x10, 23, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x00f0, 0x10, 22, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00f0, 0x10, 21, 1),
+ PINS_FIELD_BASE(126, 129, 2, 0x00f0, 0x10, 9, 1),
+ PINS_FIELD_BASE(130, 135, 2, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(136, 138, 2, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x00f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x00f0, 0x10, 20, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x00f0, 0x10, 19, 1),
+ PINS_FIELD_BASE(143, 148, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 151, 1, 0x0090, 0x10, 0, 1),
+ PINS_FIELD_BASE(152, 155, 5, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(156, 156, 5, 0x00c0, 0x10, 14, 1),
+ PINS_FIELD_BASE(157, 159, 5, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(160, 161, 5, 0x00c0, 0x10, 11, 1),
+ PINS_FIELD_BASE(162, 171, 5, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(172, 173, 5, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 5, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(175, 175, 5, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(176, 177, 5, 0x00c0, 0x10, 1, 1),
+ PINS_FIELD_BASE(178, 182, 5, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(183, 183, 7, 0x00f0, 0x10, 3, 1),
+ PINS_FIELD_BASE(184, 190, 7, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(191, 191, 7, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(195, 195, 7, 0x00f0, 0x10, 12, 1),
+ PINS_FIELD_BASE(196, 199, 7, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(204, 205, 6, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(206, 208, 7, 0x00f0, 0x10, 15, 1),
+ PINS_FIELD_BASE(209, 211, 7, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(212, 213, 7, 0x00f0, 0x10, 8, 1),
+ PINS_FIELD_BASE(214, 219, 7, 0x00f0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 9, 2, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(10, 15, 1, 0x0020, 0x10, 9, 1),
+ PIN_FIELD_BASE(16, 17, 5, 0x0030, 0x10, 21, 1),
+ PIN_FIELD_BASE(18, 25, 7, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(26, 30, 6, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(36, 39, 6, 0x0040, 0x10, 23, 1),
+ PIN_FIELD_BASE(40, 41, 6, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 6, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 44, 6, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x0040, 0x10, 20, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x0040, 0x10, 22, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x0040, 0x10, 21, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0020, 0x10, 16, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0020, 0x10, 21, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0020, 0x10, 18, 1),
+ PIN_FIELD_BASE(55, 55, 3, 0x0020, 0x10, 20, 1),
+ PIN_FIELD_BASE(56, 56, 3, 0x0020, 0x10, 19, 1),
+ PIN_FIELD_BASE(57, 60, 3, 0x0020, 0x10, 12, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0020, 0x10, 11, 1),
+ PIN_FIELD_BASE(62, 62, 4, 0x0010, 0x10, 11, 1),
+ PIN_FIELD_BASE(63, 64, 3, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 72, 3, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 84, 4, 0x0010, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 86, 4, 0x0010, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 88, 4, 0x0010, 0x10, 12, 1),
+ PIN_FIELD_BASE(89, 90, 2, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0070, 0x10, 30, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0070, 0x10, 29, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(99, 102, 2, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(107, 108, 2, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0060, 0x10, 25, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0060, 0x10, 22, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0060, 0x10, 24, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0060, 0x10, 26, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0060, 0x10, 23, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x0060, 0x10, 31, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(126, 129, 2, 0x0060, 0x10, 27, 1),
+ PIN_FIELD_BASE(130, 132, 2, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(133, 135, 2, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(136, 138, 2, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(143, 145, 1, 0x0020, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 148, 1, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 151, 1, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(152, 152, 5, 0x0030, 0x10, 26, 1),
+ PIN_FIELD_BASE(153, 153, 5, 0x0030, 0x10, 25, 1),
+ PIN_FIELD_BASE(154, 155, 5, 0x0030, 0x10, 23, 1),
+ PIN_FIELD_BASE(156, 158, 5, 0x0030, 0x10, 29, 1),
+ PIN_FIELD_BASE(159, 159, 5, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 161, 5, 0x0030, 0x10, 27, 1),
+ PIN_FIELD_BASE(162, 171, 5, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(172, 173, 5, 0x0030, 0x10, 13, 1),
+ PIN_FIELD_BASE(174, 174, 5, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 5, 0x0030, 0x10, 15, 1),
+ PIN_FIELD_BASE(176, 177, 5, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(178, 182, 5, 0x0030, 0x10, 16, 1),
+ PIN_FIELD_BASE(183, 184, 7, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(185, 185, 7, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(186, 186, 7, 0x0050, 0x10, 24, 1),
+ PIN_FIELD_BASE(187, 187, 7, 0x0050, 0x10, 26, 1),
+ PIN_FIELD_BASE(188, 188, 7, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(189, 189, 7, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(190, 191, 7, 0x0050, 0x10, 27, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x0050, 0x10, 29, 1),
+ PIN_FIELD_BASE(195, 195, 7, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(196, 196, 7, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(197, 197, 7, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(198, 198, 7, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 7, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(204, 205, 6, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(206, 208, 7, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(209, 209, 7, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(210, 210, 7, 0x0050, 0x10, 31, 1),
+ PIN_FIELD_BASE(211, 211, 7, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 7, 0x0050, 0x10, 30, 1),
+ PIN_FIELD_BASE(213, 213, 7, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(214, 214, 7, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(215, 215, 7, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(216, 217, 7, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(218, 219, 7, 0x0050, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_pu_range[] = {
+ PIN_FIELD_BASE(0, 9, 2, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(16, 17, 5, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(18, 25, 7, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(26, 30, 6, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(36, 39, 6, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(40, 41, 6, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 6, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 44, 6, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(57, 60, 3, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(62, 62, 4, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(63, 64, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 72, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 84, 4, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 86, 4, 0x0030, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 88, 4, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(89, 90, 2, 0x00b0, 0x10, 19, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x00b0, 0x10, 28, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x00b0, 0x10, 30, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x00b0, 0x10, 29, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x00b0, 0x10, 31, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x00b0, 0x10, 26, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00b0, 0x10, 27, 1),
+ PIN_FIELD_BASE(99, 102, 2, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(107, 108, 2, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x00a0, 0x10, 22, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x00a0, 0x10, 26, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00a0, 0x10, 31, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x00b0, 0x10, 14, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(126, 129, 2, 0x00a0, 0x10, 27, 1),
+ PIN_FIELD_BASE(130, 132, 2, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(133, 135, 2, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(136, 138, 2, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(143, 145, 1, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 148, 1, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 151, 1, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(156, 159, 5, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(160, 161, 5, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(162, 171, 5, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(172, 173, 5, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(174, 174, 5, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 5, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(176, 177, 5, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(178, 182, 5, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(195, 195, 7, 0x0090, 0x10, 25, 1),
+ PIN_FIELD_BASE(196, 196, 7, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(197, 197, 7, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(198, 198, 7, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 7, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x0090, 0x10, 24, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x0090, 0x10, 27, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x0090, 0x10, 26, 1),
+ PIN_FIELD_BASE(204, 205, 6, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(206, 208, 7, 0x0090, 0x10, 28, 1),
+ PIN_FIELD_BASE(209, 209, 7, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(210, 210, 7, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(211, 211, 7, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(212, 212, 7, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(213, 213, 7, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(214, 214, 7, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(215, 215, 7, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(216, 217, 7, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(218, 219, 7, 0x0090, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_pd_range[] = {
+ PIN_FIELD_BASE(0, 9, 2, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(16, 17, 5, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(18, 25, 7, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(26, 30, 6, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(36, 39, 6, 0x0060, 0x10, 17, 1),
+ PIN_FIELD_BASE(40, 41, 6, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 6, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 44, 6, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(57, 60, 3, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(62, 62, 4, 0x0020, 0x10, 11, 1),
+ PIN_FIELD_BASE(63, 64, 3, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 72, 3, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 84, 4, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 86, 4, 0x0020, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 88, 4, 0x0020, 0x10, 12, 1),
+ PIN_FIELD_BASE(89, 90, 2, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0090, 0x10, 28, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0090, 0x10, 30, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0090, 0x10, 29, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0090, 0x10, 31, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0090, 0x10, 26, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0090, 0x10, 27, 1),
+ PIN_FIELD_BASE(99, 102, 2, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(107, 108, 2, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x0080, 0x10, 31, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(126, 129, 2, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(130, 132, 2, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(133, 135, 2, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(136, 138, 2, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(143, 145, 1, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 148, 1, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 151, 1, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(156, 159, 5, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(160, 161, 5, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(162, 171, 5, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(172, 173, 5, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(174, 174, 5, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 5, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(176, 177, 5, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(178, 182, 5, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(195, 195, 7, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(196, 196, 7, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(197, 197, 7, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(198, 198, 7, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 7, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(204, 205, 6, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(206, 208, 7, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(209, 209, 7, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(210, 210, 7, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(211, 211, 7, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(212, 212, 7, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(213, 213, 7, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(214, 214, 7, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(215, 215, 7, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(216, 217, 7, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(218, 219, 7, 0x0070, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_drv_range[] = {
+ PINS_FIELD_BASE(0, 9, 2, 0x0000, 0x10, 21, 3),
+ PINS_FIELD_BASE(10, 15, 1, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(16, 17, 5, 0x0000, 0x10, 18, 3),
+ PINS_FIELD_BASE(18, 25, 7, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(26, 30, 6, 0x0000, 0x10, 15, 3),
+ PINS_FIELD_BASE(31, 35, 6, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(36, 36, 6, 0x0010, 0x10, 7, 3),
+ PINS_FIELD_BASE(37, 39, 6, 0x0010, 0x10, 4, 3),
+ PIN_FIELD_BASE(40, 41, 6, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(42, 42, 6, 0x0000, 0x10, 12, 3),
+ PINS_FIELD_BASE(43, 44, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(45, 45, 6, 0x0000, 0x10, 30, 2),
+ PIN_FIELD_BASE(46, 46, 6, 0x0010, 0x10, 2, 2),
+ PIN_FIELD_BASE(47, 47, 6, 0x0010, 0x10, 0, 2),
+ PIN_FIELD_BASE(48, 49, 6, 0x0000, 0x10, 26, 2),
+ PIN_FIELD_BASE(50, 50, 6, 0x0000, 0x10, 24, 2),
+ PIN_FIELD_BASE(51, 52, 3, 0x0000, 0x10, 18, 3),
+ PINS_FIELD_BASE(53, 56, 3, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(57, 60, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(61, 61, 3, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(62, 62, 4, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(63, 73, 3, 0x0000, 0x10, 0, 3),
+ PINS_FIELD_BASE(74, 84, 4, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(85, 86, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(87, 88, 4, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(89, 90, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(91, 91, 2, 0x0000, 0x10, 0, 3),
+ PINS_FIELD_BASE(92, 95, 2, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(96, 96, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(97, 97, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(98, 98, 2, 0x0030, 0x10, 0, 3),
+ PINS_FIELD_BASE(99, 102, 2, 0x0020, 0x10, 24, 3),
+ PINS_FIELD_BASE(103, 105, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(106, 106, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(107, 108, 2, 0x0000, 0x10, 15, 3),
+ PINS_FIELD_BASE(109, 113, 2, 0x0000, 0x10, 24, 3),
+ PINS_FIELD_BASE(114, 117, 2, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(118, 118, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(119, 119, 2, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(120, 120, 2, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(121, 121, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(122, 122, 2, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 2, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(124, 124, 2, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(125, 125, 2, 0x0020, 0x10, 0, 3),
+ PINS_FIELD_BASE(126, 129, 2, 0x0000, 0x10, 27, 3),
+ PINS_FIELD_BASE(130, 135, 2, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(136, 138, 2, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(139, 139, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(140, 140, 2, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(141, 141, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(142, 142, 2, 0x0010, 0x10, 24, 3),
+ PINS_FIELD_BASE(143, 148, 1, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(149, 151, 1, 0x0000, 0x10, 0, 3),
+ PINS_FIELD_BASE(152, 155, 5, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(156, 156, 5, 0x0010, 0x10, 6, 3),
+ PINS_FIELD_BASE(157, 159, 5, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(160, 160, 5, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(161, 161, 5, 0x0010, 0x10, 0, 3),
+ PINS_FIELD_BASE(162, 171, 5, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(172, 172, 5, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(173, 173, 5, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(174, 174, 5, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(175, 177, 5, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(178, 182, 5, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(183, 183, 7, 0x0000, 0x10, 9, 3),
+ PINS_FIELD_BASE(184, 190, 7, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(191, 191, 7, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(192, 192, 7, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(193, 193, 7, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(194, 194, 7, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(195, 195, 7, 0x0010, 0x10, 3, 3),
+ PINS_FIELD_BASE(196, 199, 7, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(200, 200, 7, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(201, 201, 7, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 7, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(203, 203, 7, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(204, 205, 6, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(206, 208, 7, 0x0010, 0x10, 12, 3),
+ PINS_FIELD_BASE(209, 212, 7, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(213, 213, 7, 0x0000, 0x10, 24, 3),
+ PINS_FIELD_BASE(214, 219, 7, 0x0000, 0x10, 0, 3),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_pupd_range[] = {
+ PIN_FIELD_BASE(10, 15, 1, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 3, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(56, 56, 3, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(152, 152, 5, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(153, 153, 5, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(154, 155, 5, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(183, 184, 7, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(185, 185, 7, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 7, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 7, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 7, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 7, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 191, 7, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x0080, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_r0_range[] = {
+ PIN_FIELD_BASE(10, 15, 1, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(24, 24, 7, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(25, 25, 7, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 3, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(56, 56, 3, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x00e0, 0x10, 22, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x00e0, 0x10, 20, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(152, 152, 5, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(153, 153, 5, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(154, 155, 5, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(161, 161, 5, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 184, 7, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(185, 185, 7, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 7, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 7, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 7, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 7, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 191, 7, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(204, 204, 6, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(205, 205, 6, 0x00d0, 0x10, 2, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_r1_range[] = {
+ PIN_FIELD_BASE(10, 15, 1, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(24, 24, 7, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(25, 25, 7, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 3, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(56, 56, 3, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x00e0, 0x10, 23, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x00e0, 0x10, 21, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(152, 152, 5, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(153, 153, 5, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(154, 155, 5, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(161, 161, 5, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(183, 184, 7, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(185, 185, 7, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 7, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 7, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 7, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 7, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 191, 7, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(204, 204, 6, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(205, 205, 6, 0x00d0, 0x10, 3, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(24, 24, 7, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(25, 25, 7, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(89, 89, 2, 0x0050, 0x10, 6, 5),
+ PIN_FIELD_BASE(90, 90, 2, 0x0050, 0x10, 11, 5),
+ PIN_FIELD_BASE(118, 118, 2, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(119, 119, 2, 0x0040, 0x10, 18, 3),
+ PIN_FIELD_BASE(120, 120, 2, 0x0040, 0x10, 15, 3),
+ PIN_FIELD_BASE(121, 121, 2, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 2, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 2, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 2, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(125, 125, 2, 0x0040, 0x10, 27, 3),
+ PIN_FIELD_BASE(139, 139, 2, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(140, 140, 2, 0x0040, 0x10, 24, 3),
+ PIN_FIELD_BASE(141, 141, 2, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(142, 142, 2, 0x0040, 0x10, 21, 3),
+ PIN_FIELD_BASE(160, 160, 5, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(161, 161, 5, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(200, 200, 7, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(201, 201, 7, 0x0030, 0x10, 15, 3),
+ PIN_FIELD_BASE(202, 202, 7, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(203, 203, 7, 0x0030, 0x10, 12, 3),
+ PIN_FIELD_BASE(204, 204, 6, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(205, 205, 6, 0x0020, 0x10, 3, 3),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_rsel_range[] = {
+ PIN_FIELD_BASE(24, 24, 7, 0x00e0, 0x10, 0, 2),
+ PIN_FIELD_BASE(25, 25, 7, 0x00e0, 0x10, 2, 2),
+ PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 0, 2),
+ PIN_FIELD_BASE(119, 119, 2, 0x00e0, 0x10, 12, 2),
+ PIN_FIELD_BASE(120, 120, 2, 0x00e0, 0x10, 10, 2),
+ PIN_FIELD_BASE(121, 121, 2, 0x00e0, 0x10, 22, 2),
+ PIN_FIELD_BASE(122, 122, 2, 0x00e0, 0x10, 8, 2),
+ PIN_FIELD_BASE(123, 123, 2, 0x00e0, 0x10, 20, 2),
+ PIN_FIELD_BASE(124, 124, 2, 0x00e0, 0x10, 6, 2),
+ PIN_FIELD_BASE(125, 125, 2, 0x00e0, 0x10, 18, 2),
+ PIN_FIELD_BASE(139, 139, 2, 0x00e0, 0x10, 4, 2),
+ PIN_FIELD_BASE(140, 140, 2, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(141, 141, 2, 0x00e0, 0x10, 2, 2),
+ PIN_FIELD_BASE(142, 142, 2, 0x00e0, 0x10, 14, 2),
+ PIN_FIELD_BASE(160, 160, 5, 0x00b0, 0x10, 0, 2),
+ PIN_FIELD_BASE(161, 161, 5, 0x00b0, 0x10, 2, 2),
+ PIN_FIELD_BASE(200, 200, 7, 0x00e0, 0x10, 6, 2),
+ PIN_FIELD_BASE(201, 201, 7, 0x00e0, 0x10, 10, 2),
+ PIN_FIELD_BASE(202, 202, 7, 0x00e0, 0x10, 4, 2),
+ PIN_FIELD_BASE(203, 203, 7, 0x00e0, 0x10, 8, 2),
+ PIN_FIELD_BASE(204, 204, 6, 0x00d0, 0x10, 0, 2),
+ PIN_FIELD_BASE(205, 205, 6, 0x00d0, 0x10, 2, 2),
+};
+
+static const unsigned int mt6893_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE, /* 0 */ MTK_PULL_PU_PD_TYPE, /* 1 */
+ MTK_PULL_PU_PD_TYPE, /* 2 */ MTK_PULL_PU_PD_TYPE, /* 3 */
+ MTK_PULL_PU_PD_TYPE, /* 4 */ MTK_PULL_PU_PD_TYPE, /* 5 */
+ MTK_PULL_PU_PD_TYPE, /* 6 */ MTK_PULL_PU_PD_TYPE, /* 7 */
+ MTK_PULL_PU_PD_TYPE, /* 8 */ MTK_PULL_PU_PD_TYPE, /* 9 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 10 */ MTK_PULL_PUPD_R1R0_TYPE, /* 11 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 12 */ MTK_PULL_PUPD_R1R0_TYPE, /* 13 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 14 */ MTK_PULL_PUPD_R1R0_TYPE, /* 15 */
+ MTK_PULL_PU_PD_TYPE, /* 16 */ MTK_PULL_PU_PD_TYPE, /* 17 */
+ MTK_PULL_PU_PD_TYPE, /* 18 */ MTK_PULL_PU_PD_TYPE, /* 19 */
+ MTK_PULL_PU_PD_TYPE, /* 20 */ MTK_PULL_PU_PD_TYPE, /* 21 */
+ MTK_PULL_PU_PD_TYPE, /* 22 */ MTK_PULL_PU_PD_TYPE, /* 23 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 24 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 25 */
+ MTK_PULL_PU_PD_TYPE, /* 26 */ MTK_PULL_PU_PD_TYPE, /* 27 */
+ MTK_PULL_PU_PD_TYPE, /* 28 */ MTK_PULL_PU_PD_TYPE, /* 29 */
+ MTK_PULL_PU_PD_TYPE, /* 30 */ MTK_PULL_PU_PD_TYPE, /* 31 */
+ MTK_PULL_PU_PD_TYPE, /* 32 */ MTK_PULL_PU_PD_TYPE, /* 33 */
+ MTK_PULL_PU_PD_TYPE, /* 34 */ MTK_PULL_PU_PD_TYPE, /* 35 */
+ MTK_PULL_PU_PD_TYPE, /* 36 */ MTK_PULL_PU_PD_TYPE, /* 37 */
+ MTK_PULL_PU_PD_TYPE, /* 38 */ MTK_PULL_PU_PD_TYPE, /* 39 */
+ MTK_PULL_PU_PD_TYPE, /* 40 */ MTK_PULL_PU_PD_TYPE, /* 41 */
+ MTK_PULL_PU_PD_TYPE, /* 42 */ MTK_PULL_PU_PD_TYPE, /* 43 */
+ MTK_PULL_PU_PD_TYPE, /* 44 */ MTK_PULL_PUPD_R1R0_TYPE, /* 45 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 46 */ MTK_PULL_PUPD_R1R0_TYPE, /* 47 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 48 */ MTK_PULL_PUPD_R1R0_TYPE, /* 49 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 50 */ MTK_PULL_PUPD_R1R0_TYPE, /* 51 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 52 */ MTK_PULL_PUPD_R1R0_TYPE, /* 53 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 54 */ MTK_PULL_PUPD_R1R0_TYPE, /* 55 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 56 */ MTK_PULL_PU_PD_TYPE, /* 57 */
+ MTK_PULL_PU_PD_TYPE, /* 58 */ MTK_PULL_PU_PD_TYPE, /* 59 */
+ MTK_PULL_PU_PD_TYPE, /* 60 */ MTK_PULL_PU_PD_TYPE, /* 61 */
+ MTK_PULL_PU_PD_TYPE, /* 62 */ MTK_PULL_PU_PD_TYPE, /* 63 */
+ MTK_PULL_PU_PD_TYPE, /* 64 */ MTK_PULL_PU_PD_TYPE, /* 65 */
+ MTK_PULL_PU_PD_TYPE, /* 66 */ MTK_PULL_PU_PD_TYPE, /* 67 */
+ MTK_PULL_PU_PD_TYPE, /* 68 */ MTK_PULL_PU_PD_TYPE, /* 69 */
+ MTK_PULL_PU_PD_TYPE, /* 70 */ MTK_PULL_PU_PD_TYPE, /* 71 */
+ MTK_PULL_PU_PD_TYPE, /* 72 */ MTK_PULL_PU_PD_TYPE, /* 73 */
+ MTK_PULL_PU_PD_TYPE, /* 74 */ MTK_PULL_PU_PD_TYPE, /* 75 */
+ MTK_PULL_PU_PD_TYPE, /* 76 */ MTK_PULL_PU_PD_TYPE, /* 77 */
+ MTK_PULL_PU_PD_TYPE, /* 78 */ MTK_PULL_PU_PD_TYPE, /* 79 */
+ MTK_PULL_PU_PD_TYPE, /* 80 */ MTK_PULL_PU_PD_TYPE, /* 81 */
+ MTK_PULL_PU_PD_TYPE, /* 82 */ MTK_PULL_PU_PD_TYPE, /* 83 */
+ MTK_PULL_PU_PD_TYPE, /* 84 */ MTK_PULL_PU_PD_TYPE, /* 85 */
+ MTK_PULL_PU_PD_TYPE, /* 86 */ MTK_PULL_PU_PD_TYPE, /* 87 */
+ MTK_PULL_PU_PD_TYPE, /* 88 */ MTK_PULL_PU_PD_TYPE, /* 89 */
+ MTK_PULL_PU_PD_TYPE, /* 90 */ MTK_PULL_PU_PD_TYPE, /* 91 */
+ MTK_PULL_PU_PD_TYPE, /* 92 */ MTK_PULL_PU_PD_TYPE, /* 93 */
+ MTK_PULL_PU_PD_TYPE, /* 94 */ MTK_PULL_PU_PD_TYPE, /* 95 */
+ MTK_PULL_PU_PD_TYPE, /* 96 */ MTK_PULL_PU_PD_TYPE, /* 97 */
+ MTK_PULL_PU_PD_TYPE, /* 98 */ MTK_PULL_PU_PD_TYPE, /* 99 */
+ MTK_PULL_PU_PD_TYPE, /* 100 */ MTK_PULL_PU_PD_TYPE, /* 101 */
+ MTK_PULL_PU_PD_TYPE, /* 102 */ MTK_PULL_PU_PD_TYPE, /* 103 */
+ MTK_PULL_PU_PD_TYPE, /* 104 */ MTK_PULL_PU_PD_TYPE, /* 105 */
+ MTK_PULL_PU_PD_TYPE, /* 106 */ MTK_PULL_PU_PD_TYPE, /* 107 */
+ MTK_PULL_PU_PD_TYPE, /* 108 */ MTK_PULL_PU_PD_TYPE, /* 109 */
+ MTK_PULL_PU_PD_TYPE, /* 110 */ MTK_PULL_PU_PD_TYPE, /* 111 */
+ MTK_PULL_PU_PD_TYPE, /* 112 */ MTK_PULL_PU_PD_TYPE, /* 113 */
+ MTK_PULL_PU_PD_TYPE, /* 114 */ MTK_PULL_PU_PD_TYPE, /* 115 */
+ MTK_PULL_PU_PD_TYPE, /* 116 */ MTK_PULL_PU_PD_TYPE, /* 117 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 118 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 119 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 120 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 121 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 122 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 123 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 124 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 125 */
+ MTK_PULL_PU_PD_TYPE, /* 126 */ MTK_PULL_PU_PD_TYPE, /* 127 */
+ MTK_PULL_PU_PD_TYPE, /* 128 */ MTK_PULL_PU_PD_TYPE, /* 129 */
+ MTK_PULL_PU_PD_TYPE, /* 130 */ MTK_PULL_PU_PD_TYPE, /* 131 */
+ MTK_PULL_PU_PD_TYPE, /* 132 */ MTK_PULL_PU_PD_TYPE, /* 133 */
+ MTK_PULL_PU_PD_TYPE, /* 134 */ MTK_PULL_PU_PD_TYPE, /* 135 */
+ MTK_PULL_PU_PD_TYPE, /* 136 */ MTK_PULL_PU_PD_TYPE, /* 137 */
+ MTK_PULL_PU_PD_TYPE, /* 138 */ MTK_PULL_PU_PD_TYPE, /* 139 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 140 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 141 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 142 */ MTK_PULL_PU_PD_TYPE, /* 143 */
+ MTK_PULL_PU_PD_TYPE, /* 144 */ MTK_PULL_PU_PD_TYPE, /* 145 */
+ MTK_PULL_PU_PD_TYPE, /* 146 */ MTK_PULL_PU_PD_TYPE, /* 147 */
+ MTK_PULL_PU_PD_TYPE, /* 148 */ MTK_PULL_PU_PD_TYPE, /* 149 */
+ MTK_PULL_PU_PD_TYPE, /* 150 */ MTK_PULL_PU_PD_TYPE, /* 151 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 152 */ MTK_PULL_PUPD_R1R0_TYPE, /* 153 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 154 */ MTK_PULL_PUPD_R1R0_TYPE, /* 155 */
+ MTK_PULL_PU_PD_TYPE, /* 156 */ MTK_PULL_PU_PD_TYPE, /* 157 */
+ MTK_PULL_PU_PD_TYPE, /* 158 */ MTK_PULL_PU_PD_TYPE, /* 159 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 160 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 161 */
+ MTK_PULL_PU_PD_TYPE, /* 162 */ MTK_PULL_PU_PD_TYPE, /* 163 */
+ MTK_PULL_PU_PD_TYPE, /* 164 */ MTK_PULL_PU_PD_TYPE, /* 165 */
+ MTK_PULL_PU_PD_TYPE, /* 166 */ MTK_PULL_PU_PD_TYPE, /* 167 */
+ MTK_PULL_PU_PD_TYPE, /* 168 */ MTK_PULL_PU_PD_TYPE, /* 169 */
+ MTK_PULL_PU_PD_TYPE, /* 170 */ MTK_PULL_PU_PD_TYPE, /* 171 */
+ MTK_PULL_PU_PD_TYPE, /* 172 */ MTK_PULL_PU_PD_TYPE, /* 173 */
+ MTK_PULL_PU_PD_TYPE, /* 174 */ MTK_PULL_PU_PD_TYPE, /* 175 */
+ MTK_PULL_PU_PD_TYPE, /* 176 */ MTK_PULL_PU_PD_TYPE, /* 177 */
+ MTK_PULL_PU_PD_TYPE, /* 178 */ MTK_PULL_PU_PD_TYPE, /* 179 */
+ MTK_PULL_PU_PD_TYPE, /* 180 */ MTK_PULL_PU_PD_TYPE, /* 181 */
+ MTK_PULL_PU_PD_TYPE, /* 182 */ MTK_PULL_PUPD_R1R0_TYPE, /* 183 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 184 */ MTK_PULL_PUPD_R1R0_TYPE, /* 185 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 186 */ MTK_PULL_PUPD_R1R0_TYPE, /* 187 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 188 */ MTK_PULL_PUPD_R1R0_TYPE, /* 189 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 190 */ MTK_PULL_PUPD_R1R0_TYPE, /* 191 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 192 */ MTK_PULL_PUPD_R1R0_TYPE, /* 193 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 194 */ MTK_PULL_PU_PD_TYPE, /* 195 */
+ MTK_PULL_PU_PD_TYPE, /* 196 */ MTK_PULL_PU_PD_TYPE, /* 197 */
+ MTK_PULL_PU_PD_TYPE, /* 198 */ MTK_PULL_PU_PD_TYPE, /* 199 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 200 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 201 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 202 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 203 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 204 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 205 */
+ MTK_PULL_PU_PD_TYPE, /* 206 */ MTK_PULL_PU_PD_TYPE, /* 207 */
+ MTK_PULL_PU_PD_TYPE, /* 208 */ MTK_PULL_PU_PD_TYPE, /* 209 */
+ MTK_PULL_PU_PD_TYPE, /* 210 */ MTK_PULL_PU_PD_TYPE, /* 211 */
+ MTK_PULL_PU_PD_TYPE, /* 212 */ MTK_PULL_PU_PD_TYPE, /* 213 */
+ MTK_PULL_PU_PD_TYPE, /* 214 */ MTK_PULL_PU_PD_TYPE, /* 215 */
+ MTK_PULL_PU_PD_TYPE, /* 216 */ MTK_PULL_PU_PD_TYPE, /* 217 */
+ MTK_PULL_PU_PD_TYPE, /* 218 */ MTK_PULL_PU_PD_TYPE, /* 219 */
+};
+
+static const char * const mt6893_pinctrl_register_base_name[] = {
+ "base", "rm", "bm", "bl", "br", "lm", "lb", "rt", "lt", "tl",
+};
+
+static const struct mtk_pin_reg_calc mt6893_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt6893_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt6893_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt6893_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt6893_pin_do_range),
+ [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt6893_pin_dir_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt6893_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt6893_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt6893_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt6893_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt6893_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt6893_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt6893_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt6893_pin_r1_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt6893_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt6893_pin_rsel_range),
+};
+
+static const struct mtk_eint_hw mt6893_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = 224,
+ .db_cnt = 32,
+ .db_time = debounce_time_mt6765,
+};
+
+static const struct mtk_pin_soc mt6893_data = {
+ .reg_cal = mt6893_reg_cals,
+ .pins = mtk_pins_mt6893,
+ .npins = ARRAY_SIZE(mtk_pins_mt6893),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt6893),
+ .eint_hw = &mt6893_eint_hw,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .base_names = mt6893_pinctrl_register_base_name,
+ .nbase_names = ARRAY_SIZE(mt6893_pinctrl_register_base_name),
+ .pull_type = mt6893_pull_type,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+};
+
+static const struct of_device_id mt6893_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt6893-pinctrl", .data = &mt6893_data },
+ { /* sentinel */ }
+};
+
+static struct platform_driver mt6893_pinctrl_driver = {
+ .driver = {
+ .name = "mt6893-pinctrl",
+ .of_match_table = mt6893_pinctrl_of_match,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops)
+ },
+ .probe = mtk_paris_pinctrl_probe,
+};
+
+static int __init mt6893_pinctrl_init(void)
+{
+ return platform_driver_register(&mt6893_pinctrl_driver);
+}
+
+arch_initcall(mt6893_pinctrl_init);
+
+MODULE_DESCRIPTION("MediaTek MT6893 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8196.c b/drivers/pinctrl/mediatek/pinctrl-mt8196.c
new file mode 100644
index 000000000000..82a73929c7a0
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8196.c
@@ -0,0 +1,1860 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 MediaTek Inc.
+ * Author: Guodong Liu <Guodong.Liu@mediatek.com>
+ * Lei Xue <lei.xue@mediatek.com>
+ * Cathy Xu <ot_cathy.xu@mediatek.com>
+ */
+
+#include <linux/module.h>
+#include "pinctrl-mtk-mt8196.h"
+#include "pinctrl-paris.h"
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt8196_pin_mode_range[] = {
+ PIN_FIELD(0, 270, 0x0300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_dir_range[] = {
+ PIN_FIELD(0, 270, 0x0000, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_di_range[] = {
+ PIN_FIELD(0, 270, 0x0200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_do_range[] = {
+ PIN_FIELD(0, 270, 0x0100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 11, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(3, 3, 11, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(4, 4, 11, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(5, 5, 11, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(6, 6, 11, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(7, 7, 11, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(8, 8, 11, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(9, 9, 9, 0x0120, 0x10, 13, 1),
+ PIN_FIELD_BASE(10, 10, 9, 0x0120, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 8, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(12, 12, 9, 0x0120, 0x10, 15, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0120, 0x10, 0, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x0120, 0x10, 1, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x0120, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 3, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(39, 39, 8, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(43, 43, 8, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(44, 44, 8, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(45, 45, 8, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(46, 46, 8, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(47, 47, 8, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(48, 48, 8, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(49, 49, 8, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(50, 50, 8, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(52, 52, 9, 0x0120, 0x10, 7, 1),
+ PIN_FIELD_BASE(53, 53, 9, 0x0120, 0x10, 8, 1),
+ PIN_FIELD_BASE(54, 54, 9, 0x0120, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 9, 0x0120, 0x10, 1, 1),
+ PIN_FIELD_BASE(56, 56, 9, 0x0120, 0x10, 5, 1),
+ PIN_FIELD_BASE(57, 57, 9, 0x0120, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 9, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(59, 59, 9, 0x0120, 0x10, 4, 1),
+ PIN_FIELD_BASE(60, 60, 9, 0x0120, 0x10, 19, 1),
+ PIN_FIELD_BASE(61, 61, 9, 0x0120, 0x10, 10, 1),
+ PIN_FIELD_BASE(62, 62, 9, 0x0120, 0x10, 9, 1),
+ PIN_FIELD_BASE(63, 63, 9, 0x0120, 0x10, 14, 1),
+ PIN_FIELD_BASE(64, 64, 9, 0x0120, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x0120, 0x10, 11, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x0120, 0x10, 16, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x0120, 0x10, 18, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x0120, 0x10, 18, 1),
+ PIN_FIELD_BASE(69, 69, 9, 0x0120, 0x10, 18, 1),
+ PIN_FIELD_BASE(70, 70, 9, 0x0120, 0x10, 17, 1),
+ PIN_FIELD_BASE(71, 71, 9, 0x0120, 0x10, 17, 1),
+ PIN_FIELD_BASE(72, 72, 9, 0x0120, 0x10, 18, 1),
+ PIN_FIELD_BASE(73, 73, 9, 0x0120, 0x10, 17, 1),
+ PIN_FIELD_BASE(74, 74, 9, 0x0120, 0x10, 17, 1),
+ PIN_FIELD_BASE(75, 75, 10, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 10, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(77, 77, 10, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(78, 78, 10, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(79, 79, 10, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(80, 80, 10, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(81, 81, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(82, 82, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(83, 83, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(84, 84, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 85, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(86, 86, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(87, 87, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(88, 88, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(89, 89, 11, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(90, 90, 11, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(91, 91, 12, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(92, 92, 12, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(93, 93, 12, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(94, 94, 12, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(95, 95, 12, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(96, 96, 12, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(97, 97, 12, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(98, 98, 12, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(99, 99, 12, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(100, 100, 12, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(101, 101, 12, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(102, 102, 12, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(103, 103, 12, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(104, 104, 12, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(105, 105, 12, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(106, 106, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(107, 107, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(108, 108, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(109, 109, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(116, 116, 5, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(117, 117, 5, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0120, 0x10, 6, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0120, 0x10, 7, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0120, 0x10, 9, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0120, 0x10, 8, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0120, 0x10, 4, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0120, 0x10, 5, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x00f0, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(160, 160, 3, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(161, 161, 3, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 3, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(163, 163, 3, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(164, 164, 3, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(165, 165, 3, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(166, 166, 3, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(167, 167, 3, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(174, 174, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(175, 175, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(176, 176, 1, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(177, 177, 1, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(178, 178, 1, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(179, 179, 1, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(180, 180, 1, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(181, 181, 1, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(182, 182, 1, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 183, 1, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(184, 184, 1, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(185, 185, 1, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(186, 186, 13, 0x0110, 0x10, 14, 1),
+ PIN_FIELD_BASE(187, 187, 13, 0x0110, 0x10, 14, 1),
+ PIN_FIELD_BASE(188, 188, 13, 0x0110, 0x10, 4, 1),
+ PIN_FIELD_BASE(189, 189, 13, 0x0110, 0x10, 9, 1),
+ PIN_FIELD_BASE(190, 190, 13, 0x0110, 0x10, 5, 1),
+ PIN_FIELD_BASE(191, 191, 13, 0x0110, 0x10, 10, 1),
+ PIN_FIELD_BASE(192, 192, 13, 0x0110, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 13, 0x0110, 0x10, 15, 1),
+ PIN_FIELD_BASE(194, 194, 13, 0x0110, 0x10, 6, 1),
+ PIN_FIELD_BASE(195, 195, 13, 0x0110, 0x10, 11, 1),
+ PIN_FIELD_BASE(196, 196, 13, 0x0110, 0x10, 1, 1),
+ PIN_FIELD_BASE(197, 197, 13, 0x0110, 0x10, 16, 1),
+ PIN_FIELD_BASE(198, 198, 13, 0x0110, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 13, 0x0110, 0x10, 12, 1),
+ PIN_FIELD_BASE(200, 200, 13, 0x0110, 0x10, 19, 1),
+ PIN_FIELD_BASE(201, 201, 13, 0x0110, 0x10, 22, 1),
+ PIN_FIELD_BASE(202, 202, 13, 0x0110, 0x10, 8, 1),
+ PIN_FIELD_BASE(203, 203, 13, 0x0110, 0x10, 13, 1),
+ PIN_FIELD_BASE(204, 204, 13, 0x0110, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 13, 0x0110, 0x10, 3, 1),
+ PIN_FIELD_BASE(206, 206, 13, 0x0110, 0x10, 18, 1),
+ PIN_FIELD_BASE(207, 207, 13, 0x0110, 0x10, 17, 1),
+ PIN_FIELD_BASE(208, 208, 13, 0x0110, 0x10, 17, 1),
+ PIN_FIELD_BASE(209, 209, 13, 0x0110, 0x10, 17, 1),
+ PIN_FIELD_BASE(210, 210, 14, 0x0130, 0x10, 0, 1),
+ PIN_FIELD_BASE(211, 211, 14, 0x0130, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 14, 0x0130, 0x10, 2, 1),
+ PIN_FIELD_BASE(213, 213, 14, 0x0130, 0x10, 3, 1),
+ PIN_FIELD_BASE(214, 214, 13, 0x0110, 0x10, 20, 1),
+ PIN_FIELD_BASE(215, 215, 13, 0x0110, 0x10, 21, 1),
+ PIN_FIELD_BASE(216, 216, 14, 0x0130, 0x10, 11, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x0130, 0x10, 11, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x0130, 0x10, 11, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x0130, 0x10, 4, 1),
+ PIN_FIELD_BASE(220, 220, 14, 0x0130, 0x10, 11, 1),
+ PIN_FIELD_BASE(221, 221, 14, 0x0130, 0x10, 12, 1),
+ PIN_FIELD_BASE(222, 222, 14, 0x0130, 0x10, 22, 1),
+ PIN_FIELD_BASE(223, 223, 14, 0x0130, 0x10, 21, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x0130, 0x10, 5, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x0130, 0x10, 6, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x0130, 0x10, 7, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x0130, 0x10, 8, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x0130, 0x10, 9, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x0130, 0x10, 10, 1),
+ PIN_FIELD_BASE(230, 230, 15, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(231, 231, 15, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(232, 232, 15, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(233, 233, 15, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(234, 234, 15, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(235, 235, 15, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(236, 236, 15, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(237, 237, 15, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(238, 238, 15, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(239, 239, 15, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(240, 240, 15, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(241, 241, 15, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(242, 242, 15, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(243, 243, 15, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(244, 244, 15, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(245, 245, 15, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(246, 246, 15, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(247, 247, 15, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(248, 248, 15, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(249, 249, 15, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(250, 250, 15, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(251, 251, 3, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(252, 252, 3, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(253, 253, 3, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(254, 254, 3, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(255, 255, 3, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(256, 256, 3, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(257, 257, 3, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(258, 258, 3, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x0130, 0x10, 13, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x0130, 0x10, 14, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x0130, 0x10, 15, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x0130, 0x10, 16, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x0130, 0x10, 17, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x0130, 0x10, 18, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x0130, 0x10, 19, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x0130, 0x10, 20, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x00e0, 0x10, 7, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 11, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(3, 3, 11, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(4, 4, 11, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(5, 5, 11, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(6, 6, 11, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(7, 7, 11, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(8, 8, 11, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 9, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(10, 10, 9, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 8, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(12, 12, 9, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(20, 20, 3, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(39, 39, 8, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 8, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 8, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 8, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(46, 46, 8, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(47, 47, 8, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 48, 8, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(49, 49, 8, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(50, 50, 8, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(52, 52, 9, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(53, 53, 9, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(54, 54, 9, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 9, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(56, 56, 9, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(57, 57, 9, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 9, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(59, 59, 9, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(60, 60, 9, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(61, 61, 9, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(62, 62, 9, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(63, 63, 9, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(64, 64, 9, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(69, 69, 9, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(70, 70, 9, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(71, 71, 9, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(72, 72, 9, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 9, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(74, 74, 9, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(75, 75, 10, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 10, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(77, 77, 10, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(78, 78, 10, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(79, 79, 10, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(80, 80, 10, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(81, 81, 11, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(82, 82, 11, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(83, 83, 11, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(84, 84, 11, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(85, 85, 11, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(86, 86, 11, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 87, 11, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(88, 88, 11, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(89, 89, 11, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(90, 90, 11, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(91, 91, 12, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(92, 92, 12, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(93, 93, 12, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(94, 94, 12, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(95, 95, 12, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(96, 96, 12, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(97, 97, 12, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(98, 98, 12, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 12, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(100, 100, 12, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(101, 101, 12, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(102, 102, 12, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(103, 103, 12, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 12, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(105, 105, 12, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 5, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(107, 107, 5, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 5, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 5, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 5, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(117, 117, 5, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x0040, 0x10, 19, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 3, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(161, 161, 3, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(162, 162, 3, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(163, 163, 3, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(164, 164, 3, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(165, 165, 3, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(166, 166, 3, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(167, 167, 3, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(174, 174, 1, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(175, 175, 1, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(176, 176, 1, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(177, 177, 1, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(178, 178, 1, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(179, 179, 1, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(180, 180, 1, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(181, 181, 1, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(182, 182, 1, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 183, 1, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(184, 184, 1, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(185, 185, 1, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(186, 186, 13, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(187, 187, 13, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(188, 188, 13, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(189, 189, 13, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(190, 190, 13, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(191, 191, 13, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(192, 192, 13, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 13, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(194, 194, 13, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(195, 195, 13, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(196, 196, 13, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(197, 197, 13, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(198, 198, 13, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(199, 199, 13, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(200, 200, 13, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(201, 201, 13, 0x0090, 0x10, 25, 1),
+ PIN_FIELD_BASE(202, 202, 13, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(203, 203, 13, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(204, 204, 13, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 13, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(206, 206, 13, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(207, 207, 13, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(208, 208, 13, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(209, 209, 13, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(210, 210, 14, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(211, 211, 14, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 14, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(213, 213, 14, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(214, 214, 13, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(215, 215, 13, 0x0090, 0x10, 24, 1),
+ PIN_FIELD_BASE(216, 216, 14, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(220, 220, 14, 0x0060, 0x10, 22, 1),
+ PIN_FIELD_BASE(221, 221, 14, 0x0060, 0x10, 23, 1),
+ PIN_FIELD_BASE(222, 222, 14, 0x0060, 0x10, 25, 1),
+ PIN_FIELD_BASE(223, 223, 14, 0x0060, 0x10, 24, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(230, 230, 15, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(231, 231, 15, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(232, 232, 15, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(233, 233, 15, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(234, 234, 15, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(235, 235, 15, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(236, 236, 15, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(237, 237, 15, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(238, 238, 15, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(239, 239, 15, 0x0040, 0x10, 23, 1),
+ PIN_FIELD_BASE(240, 240, 15, 0x0040, 0x10, 22, 1),
+ PIN_FIELD_BASE(241, 241, 15, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(242, 242, 15, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(243, 243, 15, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(244, 244, 15, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(245, 245, 15, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(246, 246, 15, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(247, 247, 15, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(248, 248, 15, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(249, 249, 15, 0x0040, 0x10, 24, 1),
+ PIN_FIELD_BASE(250, 250, 15, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(251, 251, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(252, 252, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(253, 253, 3, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(254, 254, 3, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(255, 255, 3, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(256, 256, 3, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(257, 257, 3, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(258, 258, 3, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x0060, 0x10, 16, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x0060, 0x10, 17, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x0060, 0x10, 18, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x0060, 0x10, 19, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x0060, 0x10, 20, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x0060, 0x10, 21, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x0040, 0x10, 20, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x0040, 0x10, 21, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x0040, 0x10, 19, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_pupd_range[] = {
+ PIN_FIELD_BASE(60, 60, 9, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x0080, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_r0_range[] = {
+ PIN_FIELD_BASE(60, 60, 9, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x00c0, 0x10, 16, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x00a0, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_r1_range[] = {
+ PIN_FIELD_BASE(60, 60, 9, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x00d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x00d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x00d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x00b0, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_pu_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 11, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(3, 3, 11, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(4, 4, 11, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(5, 5, 11, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(6, 6, 11, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(7, 7, 11, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(8, 8, 11, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 9, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(10, 10, 9, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 8, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(12, 12, 9, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(20, 20, 3, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(39, 39, 8, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 8, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 8, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 8, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(46, 46, 8, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(47, 47, 8, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 48, 8, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(49, 49, 8, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(50, 50, 8, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(52, 52, 9, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(53, 53, 9, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(54, 54, 9, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 9, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(56, 56, 9, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(57, 57, 9, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 9, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(59, 59, 9, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 9, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(62, 62, 9, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(63, 63, 9, 0x00c0, 0x10, 18, 1),
+ PIN_FIELD_BASE(64, 64, 9, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x00c0, 0x10, 24, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x00c0, 0x10, 21, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x00c0, 0x10, 20, 1),
+ PIN_FIELD_BASE(69, 69, 9, 0x00c0, 0x10, 25, 1),
+ PIN_FIELD_BASE(70, 70, 9, 0x00c0, 0x10, 16, 1),
+ PIN_FIELD_BASE(71, 71, 9, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(72, 72, 9, 0x00c0, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 9, 0x00c0, 0x10, 19, 1),
+ PIN_FIELD_BASE(74, 74, 9, 0x00c0, 0x10, 17, 1),
+ PIN_FIELD_BASE(75, 75, 10, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 10, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(77, 77, 10, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(78, 78, 10, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(79, 79, 10, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(80, 80, 10, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(81, 81, 11, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(82, 82, 11, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(83, 83, 11, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(84, 84, 11, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(85, 85, 11, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(86, 86, 11, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 87, 11, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(88, 88, 11, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(89, 89, 11, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(90, 90, 11, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(91, 91, 12, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(92, 92, 12, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(93, 93, 12, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(94, 94, 12, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(95, 95, 12, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(96, 96, 12, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(97, 97, 12, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(98, 98, 12, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 12, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(100, 100, 12, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(101, 101, 12, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(102, 102, 12, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(103, 103, 12, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 12, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(105, 105, 12, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 5, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(107, 107, 5, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 5, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 5, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 5, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(117, 117, 5, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 3, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(161, 161, 3, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(162, 162, 3, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(163, 163, 3, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(164, 164, 3, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(165, 165, 3, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(166, 166, 3, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(167, 167, 3, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(174, 174, 1, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(175, 175, 1, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(176, 176, 1, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(177, 177, 1, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(178, 178, 1, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(179, 179, 1, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(180, 180, 1, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(181, 181, 1, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(182, 182, 1, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 183, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(184, 184, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(185, 185, 1, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(186, 186, 13, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(187, 187, 13, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(188, 188, 13, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(189, 189, 13, 0x00d0, 0x10, 17, 1),
+ PIN_FIELD_BASE(190, 190, 13, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(191, 191, 13, 0x00d0, 0x10, 18, 1),
+ PIN_FIELD_BASE(192, 192, 13, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 13, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(194, 194, 13, 0x00d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(195, 195, 13, 0x00d0, 0x10, 19, 1),
+ PIN_FIELD_BASE(196, 196, 13, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(197, 197, 13, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(198, 198, 13, 0x00d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(199, 199, 13, 0x00d0, 0x10, 20, 1),
+ PIN_FIELD_BASE(200, 200, 13, 0x00d0, 0x10, 22, 1),
+ PIN_FIELD_BASE(201, 201, 13, 0x00d0, 0x10, 25, 1),
+ PIN_FIELD_BASE(202, 202, 13, 0x00d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(203, 203, 13, 0x00d0, 0x10, 21, 1),
+ PIN_FIELD_BASE(204, 204, 13, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 13, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(206, 206, 13, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(207, 207, 13, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(208, 208, 13, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(209, 209, 13, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(210, 210, 14, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(211, 211, 14, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 14, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(213, 213, 14, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(214, 214, 13, 0x00d0, 0x10, 23, 1),
+ PIN_FIELD_BASE(215, 215, 13, 0x00d0, 0x10, 24, 1),
+ PIN_FIELD_BASE(216, 216, 14, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(220, 220, 14, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(221, 221, 14, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(222, 222, 14, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(223, 223, 14, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(230, 230, 15, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(231, 231, 15, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(232, 232, 15, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(233, 233, 15, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(234, 234, 15, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(235, 235, 15, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(236, 236, 15, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(237, 237, 15, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(238, 238, 15, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(239, 239, 15, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(240, 240, 15, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(241, 241, 15, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(242, 242, 15, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(243, 243, 15, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(244, 244, 15, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(245, 245, 15, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(246, 246, 15, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(247, 247, 15, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(248, 248, 15, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(249, 249, 15, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(250, 250, 15, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(251, 251, 3, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(252, 252, 3, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(253, 253, 3, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(254, 254, 3, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(255, 255, 3, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(256, 256, 3, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(257, 257, 3, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(258, 258, 3, 0x0090, 0x10, 9, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_pd_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 11, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(3, 3, 11, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(4, 4, 11, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(5, 5, 11, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(6, 6, 11, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(7, 7, 11, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(8, 8, 11, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 9, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(10, 10, 9, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 8, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(12, 12, 9, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(20, 20, 3, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(39, 39, 8, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 8, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 8, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 8, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(46, 46, 8, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(47, 47, 8, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 48, 8, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(49, 49, 8, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(50, 50, 8, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(52, 52, 9, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(53, 53, 9, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(54, 54, 9, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 9, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(56, 56, 9, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(57, 57, 9, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 9, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(59, 59, 9, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 9, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(62, 62, 9, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(63, 63, 9, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(64, 64, 9, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x00a0, 0x10, 21, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x00a0, 0x10, 20, 1),
+ PIN_FIELD_BASE(69, 69, 9, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(70, 70, 9, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(71, 71, 9, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(72, 72, 9, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 9, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(74, 74, 9, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(75, 75, 10, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 10, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(77, 77, 10, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(78, 78, 10, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(79, 79, 10, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(80, 80, 10, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(81, 81, 11, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(82, 82, 11, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(83, 83, 11, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(84, 84, 11, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(85, 85, 11, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(86, 86, 11, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 87, 11, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(88, 88, 11, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(89, 89, 11, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(90, 90, 11, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(91, 91, 12, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(92, 92, 12, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(93, 93, 12, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(94, 94, 12, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(95, 95, 12, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(96, 96, 12, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(97, 97, 12, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(98, 98, 12, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 12, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(100, 100, 12, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(101, 101, 12, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(102, 102, 12, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(103, 103, 12, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 12, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(105, 105, 12, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 5, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(107, 107, 5, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 5, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 5, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 5, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(117, 117, 5, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 3, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(161, 161, 3, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(162, 162, 3, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(163, 163, 3, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(164, 164, 3, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(165, 165, 3, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(166, 166, 3, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(167, 167, 3, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(174, 174, 1, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(175, 175, 1, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(176, 176, 1, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(177, 177, 1, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(178, 178, 1, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(179, 179, 1, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(180, 180, 1, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(181, 181, 1, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(182, 182, 1, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 183, 1, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(184, 184, 1, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(185, 185, 1, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(186, 186, 13, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(187, 187, 13, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(188, 188, 13, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(189, 189, 13, 0x00c0, 0x10, 17, 1),
+ PIN_FIELD_BASE(190, 190, 13, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(191, 191, 13, 0x00c0, 0x10, 18, 1),
+ PIN_FIELD_BASE(192, 192, 13, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 13, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(194, 194, 13, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(195, 195, 13, 0x00c0, 0x10, 19, 1),
+ PIN_FIELD_BASE(196, 196, 13, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(197, 197, 13, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(198, 198, 13, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(199, 199, 13, 0x00c0, 0x10, 20, 1),
+ PIN_FIELD_BASE(200, 200, 13, 0x00c0, 0x10, 22, 1),
+ PIN_FIELD_BASE(201, 201, 13, 0x00c0, 0x10, 25, 1),
+ PIN_FIELD_BASE(202, 202, 13, 0x00c0, 0x10, 16, 1),
+ PIN_FIELD_BASE(203, 203, 13, 0x00c0, 0x10, 21, 1),
+ PIN_FIELD_BASE(204, 204, 13, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 13, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(206, 206, 13, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(207, 207, 13, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(208, 208, 13, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(209, 209, 13, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(210, 210, 14, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(211, 211, 14, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 14, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(213, 213, 14, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(214, 214, 13, 0x00c0, 0x10, 23, 1),
+ PIN_FIELD_BASE(215, 215, 13, 0x00c0, 0x10, 24, 1),
+ PIN_FIELD_BASE(216, 216, 14, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(220, 220, 14, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(221, 221, 14, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(222, 222, 14, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(223, 223, 14, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(230, 230, 15, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(231, 231, 15, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(232, 232, 15, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(233, 233, 15, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(234, 234, 15, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(235, 235, 15, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(236, 236, 15, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(237, 237, 15, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(238, 238, 15, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(239, 239, 15, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(240, 240, 15, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(241, 241, 15, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(242, 242, 15, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(243, 243, 15, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(244, 244, 15, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(245, 245, 15, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(246, 246, 15, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(247, 247, 15, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(248, 248, 15, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(249, 249, 15, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(250, 250, 15, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(251, 251, 3, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(252, 252, 3, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(253, 253, 3, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(254, 254, 3, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(255, 255, 3, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(256, 256, 3, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(257, 257, 3, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(258, 258, 3, 0x0080, 0x10, 9, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(1, 1, 8, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(2, 2, 11, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(3, 3, 11, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(4, 4, 11, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(5, 5, 11, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(6, 6, 11, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(7, 7, 11, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(8, 8, 11, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(9, 9, 9, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(10, 10, 9, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(11, 11, 8, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(12, 12, 9, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(13, 13, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(14, 14, 3, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(15, 15, 6, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(16, 16, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(17, 17, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(18, 18, 6, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(19, 19, 6, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(20, 20, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(21, 21, 2, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(22, 22, 2, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(23, 23, 2, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(24, 24, 2, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(25, 25, 2, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(26, 26, 2, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(27, 27, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(28, 28, 2, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(29, 29, 2, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(30, 30, 2, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(31, 31, 2, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(32, 32, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(33, 33, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(34, 34, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(35, 35, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(36, 36, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(37, 37, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(38, 38, 1, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(39, 39, 8, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(40, 40, 8, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(41, 41, 8, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(42, 42, 8, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(43, 43, 8, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(44, 44, 8, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(45, 45, 8, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(46, 46, 8, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(47, 47, 8, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(48, 48, 8, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(49, 49, 8, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(50, 50, 8, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(51, 51, 8, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(52, 52, 9, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(53, 53, 9, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(54, 54, 9, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(55, 55, 9, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(56, 56, 9, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(57, 57, 9, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(58, 58, 9, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(59, 59, 9, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(60, 60, 9, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(61, 61, 9, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(62, 62, 9, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(63, 63, 9, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(64, 64, 9, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(65, 65, 9, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(66, 66, 9, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(67, 67, 9, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(68, 68, 9, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(69, 69, 9, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(70, 70, 9, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(71, 71, 9, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(72, 72, 9, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(73, 73, 9, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(74, 74, 9, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(75, 75, 10, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(76, 76, 10, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(77, 77, 10, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(78, 78, 10, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(79, 79, 10, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(80, 80, 10, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(81, 81, 11, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(82, 82, 11, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(83, 83, 11, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(84, 84, 11, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(85, 85, 11, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(86, 86, 11, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(87, 87, 11, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(88, 88, 11, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(89, 89, 11, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(90, 90, 11, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(91, 91, 12, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(92, 92, 12, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(93, 93, 12, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(94, 94, 12, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(95, 95, 12, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(96, 96, 12, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(97, 97, 12, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(98, 98, 12, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(99, 99, 12, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(100, 100, 12, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(101, 101, 12, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(102, 102, 12, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(103, 103, 12, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(104, 104, 12, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(105, 105, 12, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(106, 106, 5, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(107, 107, 5, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(108, 108, 5, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(109, 109, 5, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(110, 110, 5, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(111, 111, 5, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(112, 112, 5, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(113, 113, 5, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(114, 114, 5, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(115, 115, 5, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(116, 116, 5, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(117, 117, 5, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(118, 118, 6, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(119, 119, 6, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(120, 120, 6, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(121, 121, 6, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(122, 122, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(123, 123, 6, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(124, 124, 6, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(125, 125, 7, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(126, 126, 7, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(127, 127, 7, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(128, 128, 7, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(129, 129, 7, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(130, 130, 7, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(131, 131, 7, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(132, 132, 7, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(133, 133, 7, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(134, 134, 7, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(135, 135, 7, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(136, 136, 7, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(137, 137, 4, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(138, 138, 4, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(139, 139, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(140, 140, 4, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(141, 141, 4, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(142, 142, 4, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(143, 143, 4, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(144, 144, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(145, 145, 4, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(146, 146, 4, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(147, 147, 4, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(148, 148, 4, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(149, 149, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(150, 150, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(151, 151, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(152, 152, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(153, 153, 4, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(154, 154, 4, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(155, 155, 4, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(156, 156, 4, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(157, 157, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(158, 158, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(159, 159, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(160, 160, 3, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(161, 161, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(162, 162, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(163, 163, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(164, 164, 3, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(165, 165, 3, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(166, 166, 3, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(167, 167, 3, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(168, 168, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(169, 169, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(170, 170, 3, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(171, 171, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(172, 172, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(173, 173, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(174, 174, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(175, 175, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(176, 176, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(177, 177, 1, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(178, 178, 1, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(179, 179, 1, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(180, 180, 1, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(181, 181, 1, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(182, 182, 1, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(183, 183, 1, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(184, 184, 1, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(185, 185, 1, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(186, 186, 13, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(187, 187, 13, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(188, 188, 13, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(189, 189, 13, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(190, 190, 13, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(191, 191, 13, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(192, 192, 13, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(193, 193, 13, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(194, 194, 13, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(195, 195, 13, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(196, 196, 13, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(197, 197, 13, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(198, 198, 13, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(199, 199, 13, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(200, 200, 13, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(201, 201, 13, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(202, 202, 13, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(203, 203, 13, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(204, 204, 13, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(205, 205, 13, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(206, 206, 13, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(207, 207, 13, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(208, 208, 13, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(209, 209, 13, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(210, 210, 14, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(211, 211, 14, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(212, 212, 14, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(213, 213, 14, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(214, 214, 13, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(215, 215, 13, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(216, 216, 14, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(217, 217, 14, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(218, 218, 14, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(219, 219, 14, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(220, 220, 14, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(221, 221, 14, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(222, 222, 14, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(223, 223, 14, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(224, 224, 14, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(225, 225, 14, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(226, 226, 14, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(227, 227, 14, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(228, 228, 14, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(229, 229, 14, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(230, 230, 15, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(231, 231, 15, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(232, 232, 15, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(233, 233, 15, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(234, 234, 15, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(235, 235, 15, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(236, 236, 15, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(237, 237, 15, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(238, 238, 15, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(239, 239, 15, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(240, 240, 15, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(241, 241, 15, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(242, 242, 15, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(243, 243, 15, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(244, 244, 15, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(245, 245, 15, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(246, 246, 15, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(247, 247, 15, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(248, 248, 15, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(249, 249, 15, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(250, 250, 15, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(251, 251, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(252, 252, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(253, 253, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(254, 254, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(255, 255, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(256, 256, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(257, 257, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(258, 258, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(259, 259, 14, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(260, 260, 14, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(261, 261, 14, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(262, 262, 14, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(263, 263, 14, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(264, 264, 14, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(265, 265, 14, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(266, 266, 14, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(267, 267, 15, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(268, 268, 15, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(269, 269, 15, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(270, 270, 15, 0x0000, 0x10, 21, 3),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(46, 46, 8, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(47, 47, 8, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(48, 48, 8, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(49, 49, 8, 0x0030, 0x10, 12, 3),
+ PIN_FIELD_BASE(50, 50, 8, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(51, 51, 8, 0x0030, 0x10, 15, 3),
+ PIN_FIELD_BASE(52, 52, 9, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(53, 53, 9, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(75, 75, 10, 0x0020, 0x10, 0, 5),
+ PIN_FIELD_BASE(76, 76, 10, 0x0020, 0x10, 5, 5),
+ PIN_FIELD_BASE(77, 77, 10, 0x0020, 0x10, 10, 5),
+ PIN_FIELD_BASE(78, 78, 10, 0x0020, 0x10, 15, 5),
+ PIN_FIELD_BASE(99, 99, 12, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(100, 100, 12, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(101, 101, 12, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(102, 102, 12, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(104, 104, 12, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(105, 105, 12, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(123, 123, 6, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 6, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(164, 164, 3, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(165, 165, 3, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(166, 166, 3, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(167, 167, 3, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(168, 168, 3, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(170, 170, 3, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(176, 176, 1, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(177, 177, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(188, 188, 13, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(189, 189, 13, 0x0040, 0x10, 15, 3),
+ PIN_FIELD_BASE(190, 190, 13, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(191, 191, 13, 0x0040, 0x10, 18, 3),
+ PIN_FIELD_BASE(194, 194, 13, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(195, 195, 13, 0x0040, 0x10, 21, 3),
+ PIN_FIELD_BASE(198, 198, 13, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(199, 199, 13, 0x0040, 0x10, 24, 3),
+ PIN_FIELD_BASE(200, 200, 13, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(201, 201, 13, 0x0050, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 13, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(203, 203, 13, 0x0040, 0x10, 27, 3),
+ PIN_FIELD_BASE(214, 214, 13, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(215, 215, 13, 0x0050, 0x10, 6, 3),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_rsel_range[] = {
+ PIN_FIELD_BASE(46, 46, 8, 0x00c0, 0x10, 0, 3),
+ PIN_FIELD_BASE(47, 47, 8, 0x00c0, 0x10, 9, 3),
+ PIN_FIELD_BASE(48, 48, 8, 0x00c0, 0x10, 3, 3),
+ PIN_FIELD_BASE(49, 49, 8, 0x00c0, 0x10, 12, 3),
+ PIN_FIELD_BASE(50, 50, 8, 0x00c0, 0x10, 6, 3),
+ PIN_FIELD_BASE(51, 51, 8, 0x00c0, 0x10, 15, 3),
+ PIN_FIELD_BASE(52, 52, 9, 0x0110, 0x10, 0, 3),
+ PIN_FIELD_BASE(53, 53, 9, 0x0110, 0x10, 3, 3),
+ PIN_FIELD_BASE(99, 99, 12, 0x00b0, 0x10, 0, 3),
+ PIN_FIELD_BASE(100, 100, 12, 0x00b0, 0x10, 9, 3),
+ PIN_FIELD_BASE(101, 101, 12, 0x00b0, 0x10, 3, 3),
+ PIN_FIELD_BASE(102, 102, 12, 0x00b0, 0x10, 12, 3),
+ PIN_FIELD_BASE(104, 104, 12, 0x00b0, 0x10, 6, 3),
+ PIN_FIELD_BASE(105, 105, 12, 0x00b0, 0x10, 15, 3),
+ PIN_FIELD_BASE(123, 123, 6, 0x0100, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 6, 0x0100, 0x10, 3, 3),
+ PIN_FIELD_BASE(164, 164, 3, 0x00b0, 0x10, 0, 3),
+ PIN_FIELD_BASE(165, 165, 3, 0x00b0, 0x10, 6, 3),
+ PIN_FIELD_BASE(166, 166, 3, 0x00b0, 0x10, 3, 3),
+ PIN_FIELD_BASE(167, 167, 3, 0x00b0, 0x10, 9, 3),
+ PIN_FIELD_BASE(168, 168, 3, 0x00b0, 0x10, 12, 3),
+ PIN_FIELD_BASE(170, 170, 3, 0x00b0, 0x10, 15, 3),
+ PIN_FIELD_BASE(176, 176, 1, 0x00b0, 0x10, 0, 3),
+ PIN_FIELD_BASE(177, 177, 1, 0x00b0, 0x10, 3, 3),
+ PIN_FIELD_BASE(188, 188, 13, 0x00f0, 0x10, 0, 3),
+ PIN_FIELD_BASE(189, 189, 13, 0x00f0, 0x10, 15, 3),
+ PIN_FIELD_BASE(190, 190, 13, 0x00f0, 0x10, 3, 3),
+ PIN_FIELD_BASE(191, 191, 13, 0x00f0, 0x10, 18, 3),
+ PIN_FIELD_BASE(194, 194, 13, 0x00f0, 0x10, 6, 3),
+ PIN_FIELD_BASE(195, 195, 13, 0x00f0, 0x10, 21, 3),
+ PIN_FIELD_BASE(198, 198, 13, 0x00f0, 0x10, 9, 3),
+ PIN_FIELD_BASE(199, 199, 13, 0x00f0, 0x10, 24, 3),
+ PIN_FIELD_BASE(200, 200, 13, 0x0100, 0x10, 0, 3),
+ PIN_FIELD_BASE(201, 201, 13, 0x0100, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 13, 0x00f0, 0x10, 12, 3),
+ PIN_FIELD_BASE(203, 203, 13, 0x00f0, 0x10, 27, 3),
+ PIN_FIELD_BASE(214, 214, 13, 0x0100, 0x10, 3, 3),
+ PIN_FIELD_BASE(215, 215, 13, 0x0100, 0x10, 6, 3),
+};
+
+static const struct mtk_pin_rsel mt8196_pin_rsel_val_range[] = {
+ PIN_RSEL(46, 53, 0x0, 75000, 75000),
+ PIN_RSEL(46, 53, 0x1, 10000, 5000),
+ PIN_RSEL(46, 53, 0x2, 5000, 75000),
+ PIN_RSEL(46, 53, 0x3, 4000, 5000),
+ PIN_RSEL(46, 53, 0x4, 3000, 75000),
+ PIN_RSEL(46, 53, 0x5, 2000, 5000),
+ PIN_RSEL(46, 53, 0x6, 1500, 75000),
+ PIN_RSEL(46, 53, 0x7, 1000, 5000),
+ PIN_RSEL(99, 102, 0x0, 75000, 75000),
+ PIN_RSEL(99, 102, 0x1, 10000, 5000),
+ PIN_RSEL(99, 102, 0x2, 5000, 75000),
+ PIN_RSEL(99, 102, 0x3, 4000, 5000),
+ PIN_RSEL(99, 102, 0x4, 3000, 75000),
+ PIN_RSEL(99, 102, 0x5, 2000, 5000),
+ PIN_RSEL(99, 102, 0x6, 1500, 75000),
+ PIN_RSEL(99, 102, 0x7, 1000, 5000),
+ PIN_RSEL(104, 105, 0x0, 75000, 75000),
+ PIN_RSEL(104, 105, 0x1, 10000, 5000),
+ PIN_RSEL(104, 105, 0x2, 5000, 75000),
+ PIN_RSEL(104, 105, 0x3, 4000, 5000),
+ PIN_RSEL(104, 105, 0x4, 3000, 75000),
+ PIN_RSEL(104, 105, 0x5, 2000, 5000),
+ PIN_RSEL(104, 105, 0x6, 1500, 75000),
+ PIN_RSEL(104, 105, 0x7, 1000, 5000),
+ PIN_RSEL(123, 124, 0x0, 75000, 75000),
+ PIN_RSEL(123, 124, 0x1, 10000, 5000),
+ PIN_RSEL(123, 124, 0x2, 5000, 75000),
+ PIN_RSEL(123, 124, 0x3, 4000, 5000),
+ PIN_RSEL(123, 124, 0x4, 3000, 75000),
+ PIN_RSEL(123, 124, 0x5, 2000, 5000),
+ PIN_RSEL(123, 124, 0x6, 1500, 75000),
+ PIN_RSEL(123, 124, 0x7, 1000, 5000),
+ PIN_RSEL(164, 168, 0x0, 75000, 75000),
+ PIN_RSEL(164, 168, 0x1, 10000, 5000),
+ PIN_RSEL(164, 168, 0x2, 5000, 75000),
+ PIN_RSEL(164, 168, 0x3, 4000, 5000),
+ PIN_RSEL(164, 168, 0x4, 3000, 75000),
+ PIN_RSEL(164, 168, 0x5, 2000, 5000),
+ PIN_RSEL(164, 168, 0x6, 1500, 75000),
+ PIN_RSEL(164, 168, 0x7, 1000, 5000),
+ PIN_RSEL(170, 170, 0x0, 75000, 75000),
+ PIN_RSEL(170, 170, 0x1, 10000, 5000),
+ PIN_RSEL(170, 170, 0x2, 5000, 75000),
+ PIN_RSEL(170, 170, 0x3, 4000, 5000),
+ PIN_RSEL(170, 170, 0x4, 3000, 75000),
+ PIN_RSEL(170, 170, 0x5, 2000, 5000),
+ PIN_RSEL(170, 170, 0x6, 1500, 75000),
+ PIN_RSEL(170, 170, 0x7, 1000, 5000),
+ PIN_RSEL(176, 177, 0x0, 75000, 75000),
+ PIN_RSEL(176, 177, 0x1, 10000, 5000),
+ PIN_RSEL(176, 177, 0x2, 5000, 75000),
+ PIN_RSEL(176, 177, 0x3, 4000, 5000),
+ PIN_RSEL(176, 177, 0x4, 3000, 75000),
+ PIN_RSEL(176, 177, 0x5, 2000, 5000),
+ PIN_RSEL(176, 177, 0x6, 1500, 75000),
+ PIN_RSEL(176, 177, 0x7, 1000, 5000),
+ PIN_RSEL(188, 191, 0x0, 75000, 75000),
+ PIN_RSEL(188, 191, 0x1, 10000, 5000),
+ PIN_RSEL(188, 191, 0x2, 5000, 75000),
+ PIN_RSEL(188, 191, 0x3, 4000, 5000),
+ PIN_RSEL(188, 191, 0x4, 3000, 75000),
+ PIN_RSEL(188, 191, 0x5, 2000, 5000),
+ PIN_RSEL(188, 191, 0x6, 1500, 75000),
+ PIN_RSEL(188, 191, 0x7, 1000, 5000),
+ PIN_RSEL(194, 195, 0x0, 75000, 75000),
+ PIN_RSEL(194, 195, 0x1, 10000, 5000),
+ PIN_RSEL(194, 195, 0x2, 5000, 75000),
+ PIN_RSEL(194, 195, 0x3, 4000, 5000),
+ PIN_RSEL(194, 195, 0x4, 3000, 75000),
+ PIN_RSEL(194, 195, 0x5, 2000, 5000),
+ PIN_RSEL(194, 195, 0x6, 1500, 75000),
+ PIN_RSEL(194, 195, 0x7, 1000, 5000),
+ PIN_RSEL(198, 203, 0x0, 75000, 75000),
+ PIN_RSEL(198, 203, 0x1, 10000, 5000),
+ PIN_RSEL(198, 203, 0x2, 5000, 75000),
+ PIN_RSEL(198, 203, 0x3, 4000, 5000),
+ PIN_RSEL(198, 203, 0x4, 3000, 75000),
+ PIN_RSEL(198, 203, 0x5, 2000, 5000),
+ PIN_RSEL(198, 203, 0x6, 1500, 75000),
+ PIN_RSEL(198, 203, 0x7, 1000, 5000),
+ PIN_RSEL(214, 215, 0x0, 75000, 75000),
+ PIN_RSEL(214, 215, 0x1, 10000, 5000),
+ PIN_RSEL(214, 215, 0x2, 5000, 75000),
+ PIN_RSEL(214, 215, 0x3, 4000, 5000),
+ PIN_RSEL(214, 215, 0x4, 3000, 75000),
+ PIN_RSEL(214, 215, 0x5, 2000, 5000),
+ PIN_RSEL(214, 215, 0x6, 1500, 75000),
+ PIN_RSEL(214, 215, 0x7, 1000, 5000),
+};
+
+static const unsigned int mt8196_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE,/*0*/ MTK_PULL_PU_PD_TYPE,/*1*/
+ MTK_PULL_PU_PD_TYPE,/*2*/ MTK_PULL_PU_PD_TYPE,/*3*/
+ MTK_PULL_PU_PD_TYPE,/*4*/ MTK_PULL_PU_PD_TYPE,/*5*/
+ MTK_PULL_PU_PD_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE,/*7*/
+ MTK_PULL_PU_PD_TYPE,/*8*/ MTK_PULL_PU_PD_TYPE,/*9*/
+ MTK_PULL_PU_PD_TYPE,/*10*/ MTK_PULL_PU_PD_TYPE,/*11*/
+ MTK_PULL_PU_PD_TYPE,/*12*/ MTK_PULL_PU_PD_TYPE,/*13*/
+ MTK_PULL_PU_PD_TYPE,/*14*/ MTK_PULL_PU_PD_TYPE,/*15*/
+ MTK_PULL_PU_PD_TYPE,/*16*/ MTK_PULL_PU_PD_TYPE,/*17*/
+ MTK_PULL_PU_PD_TYPE,/*18*/ MTK_PULL_PU_PD_TYPE,/*19*/
+ MTK_PULL_PU_PD_TYPE,/*20*/ MTK_PULL_PU_PD_TYPE,/*21*/
+ MTK_PULL_PU_PD_TYPE,/*22*/ MTK_PULL_PU_PD_TYPE,/*23*/
+ MTK_PULL_PU_PD_TYPE,/*24*/ MTK_PULL_PU_PD_TYPE,/*25*/
+ MTK_PULL_PU_PD_TYPE,/*26*/ MTK_PULL_PU_PD_TYPE,/*27*/
+ MTK_PULL_PU_PD_TYPE,/*28*/ MTK_PULL_PU_PD_TYPE,/*29*/
+ MTK_PULL_PU_PD_TYPE,/*30*/ MTK_PULL_PU_PD_TYPE,/*31*/
+ MTK_PULL_PU_PD_TYPE,/*32*/ MTK_PULL_PU_PD_TYPE,/*33*/
+ MTK_PULL_PU_PD_TYPE,/*34*/ MTK_PULL_PU_PD_TYPE,/*35*/
+ MTK_PULL_PU_PD_TYPE,/*36*/ MTK_PULL_PU_PD_TYPE,/*37*/
+ MTK_PULL_PU_PD_TYPE,/*38*/ MTK_PULL_PU_PD_TYPE,/*39*/
+ MTK_PULL_PU_PD_TYPE,/*40*/ MTK_PULL_PU_PD_TYPE,/*41*/
+ MTK_PULL_PU_PD_TYPE,/*42*/ MTK_PULL_PU_PD_TYPE,/*43*/
+ MTK_PULL_PU_PD_TYPE,/*44*/ MTK_PULL_PU_PD_TYPE,/*45*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*46*/ MTK_PULL_PU_PD_RSEL_TYPE,/*47*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*48*/ MTK_PULL_PU_PD_RSEL_TYPE,/*49*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*50*/ MTK_PULL_PU_PD_RSEL_TYPE,/*51*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*52*/ MTK_PULL_PU_PD_RSEL_TYPE,/*53*/
+ MTK_PULL_PU_PD_TYPE,/*54*/ MTK_PULL_PU_PD_TYPE,/*55*/
+ MTK_PULL_PU_PD_TYPE,/*56*/ MTK_PULL_PU_PD_TYPE,/*57*/
+ MTK_PULL_PU_PD_TYPE,/*58*/ MTK_PULL_PU_PD_TYPE,/*59*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*60*/ MTK_PULL_PU_PD_TYPE,/*61*/
+ MTK_PULL_PU_PD_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE,/*63*/
+ MTK_PULL_PU_PD_TYPE,/*64*/ MTK_PULL_PU_PD_TYPE,/*65*/
+ MTK_PULL_PU_PD_TYPE,/*66*/ MTK_PULL_PU_PD_TYPE,/*67*/
+ MTK_PULL_PU_PD_TYPE,/*68*/ MTK_PULL_PU_PD_TYPE,/*69*/
+ MTK_PULL_PU_PD_TYPE,/*70*/ MTK_PULL_PU_PD_TYPE,/*71*/
+ MTK_PULL_PU_PD_TYPE,/*72*/ MTK_PULL_PU_PD_TYPE,/*73*/
+ MTK_PULL_PU_PD_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE,/*75*/
+ MTK_PULL_PU_PD_TYPE,/*76*/ MTK_PULL_PU_PD_TYPE,/*77*/
+ MTK_PULL_PU_PD_TYPE,/*78*/ MTK_PULL_PU_PD_TYPE,/*79*/
+ MTK_PULL_PU_PD_TYPE,/*80*/ MTK_PULL_PU_PD_TYPE,/*81*/
+ MTK_PULL_PU_PD_TYPE,/*82*/ MTK_PULL_PU_PD_TYPE,/*83*/
+ MTK_PULL_PU_PD_TYPE,/*84*/ MTK_PULL_PU_PD_TYPE,/*85*/
+ MTK_PULL_PU_PD_TYPE,/*86*/ MTK_PULL_PU_PD_TYPE,/*87*/
+ MTK_PULL_PU_PD_TYPE,/*88*/ MTK_PULL_PU_PD_TYPE,/*89*/
+ MTK_PULL_PU_PD_TYPE,/*90*/ MTK_PULL_PU_PD_TYPE,/*91*/
+ MTK_PULL_PU_PD_TYPE,/*92*/ MTK_PULL_PU_PD_TYPE,/*93*/
+ MTK_PULL_PU_PD_TYPE,/*94*/ MTK_PULL_PU_PD_TYPE,/*95*/
+ MTK_PULL_PU_PD_TYPE,/*96*/ MTK_PULL_PU_PD_TYPE,/*97*/
+ MTK_PULL_PU_PD_TYPE,/*98*/ MTK_PULL_PU_PD_RSEL_TYPE,/*99*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*100*/ MTK_PULL_PU_PD_RSEL_TYPE,/*101*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*102*/ MTK_PULL_PU_PD_TYPE,/*103*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*104*/ MTK_PULL_PU_PD_RSEL_TYPE,/*105*/
+ MTK_PULL_PU_PD_TYPE,/*106*/ MTK_PULL_PU_PD_TYPE,/*107*/
+ MTK_PULL_PU_PD_TYPE,/*108*/ MTK_PULL_PU_PD_TYPE,/*109*/
+ MTK_PULL_PU_PD_TYPE,/*110*/ MTK_PULL_PU_PD_TYPE,/*111*/
+ MTK_PULL_PU_PD_TYPE,/*112*/ MTK_PULL_PU_PD_TYPE,/*113*/
+ MTK_PULL_PU_PD_TYPE,/*114*/ MTK_PULL_PU_PD_TYPE,/*115*/
+ MTK_PULL_PU_PD_TYPE,/*116*/ MTK_PULL_PU_PD_TYPE,/*117*/
+ MTK_PULL_PU_PD_TYPE,/*118*/ MTK_PULL_PU_PD_TYPE,/*119*/
+ MTK_PULL_PU_PD_TYPE,/*120*/ MTK_PULL_PU_PD_TYPE,/*121*/
+ MTK_PULL_PU_PD_TYPE,/*122*/ MTK_PULL_PU_PD_RSEL_TYPE,/*123*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*124*/ MTK_PULL_PUPD_R1R0_TYPE,/*125*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*126*/ MTK_PULL_PUPD_R1R0_TYPE,/*127*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*128*/ MTK_PULL_PUPD_R1R0_TYPE,/*129*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*130*/ MTK_PULL_PUPD_R1R0_TYPE,/*131*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*132*/ MTK_PULL_PUPD_R1R0_TYPE,/*133*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*134*/ MTK_PULL_PUPD_R1R0_TYPE,/*135*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*136*/ MTK_PULL_PUPD_R1R0_TYPE,/*137*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*138*/ MTK_PULL_PUPD_R1R0_TYPE,/*139*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*140*/ MTK_PULL_PUPD_R1R0_TYPE,/*141*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*142*/ MTK_PULL_PUPD_R1R0_TYPE,/*143*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*144*/ MTK_PULL_PUPD_R1R0_TYPE,/*145*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*146*/ MTK_PULL_PUPD_R1R0_TYPE,/*147*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*148*/ MTK_PULL_PUPD_R1R0_TYPE,/*149*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*150*/ MTK_PULL_PUPD_R1R0_TYPE,/*151*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*156*/ MTK_PULL_PU_PD_TYPE,/*157*/
+ MTK_PULL_PU_PD_TYPE,/*158*/ MTK_PULL_PU_PD_TYPE,/*159*/
+ MTK_PULL_PU_PD_TYPE,/*160*/ MTK_PULL_PU_PD_TYPE,/*161*/
+ MTK_PULL_PU_PD_TYPE,/*162*/ MTK_PULL_PU_PD_TYPE,/*163*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*164*/ MTK_PULL_PU_PD_RSEL_TYPE,/*165*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*166*/ MTK_PULL_PU_PD_RSEL_TYPE,/*167*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*168*/ MTK_PULL_PU_PD_TYPE,/*169*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*170*/ MTK_PULL_PU_PD_TYPE,/*171*/
+ MTK_PULL_PU_PD_TYPE,/*172*/ MTK_PULL_PU_PD_TYPE,/*173*/
+ MTK_PULL_PU_PD_TYPE,/*174*/ MTK_PULL_PU_PD_TYPE,/*175*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*176*/ MTK_PULL_PU_PD_RSEL_TYPE,/*177*/
+ MTK_PULL_PU_PD_TYPE,/*178*/ MTK_PULL_PU_PD_TYPE,/*179*/
+ MTK_PULL_PU_PD_TYPE,/*180*/ MTK_PULL_PU_PD_TYPE,/*181*/
+ MTK_PULL_PU_PD_TYPE,/*182*/ MTK_PULL_PU_PD_TYPE,/*183*/
+ MTK_PULL_PU_PD_TYPE,/*184*/ MTK_PULL_PU_PD_TYPE,/*185*/
+ MTK_PULL_PU_PD_TYPE,/*186*/ MTK_PULL_PU_PD_TYPE,/*187*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*188*/ MTK_PULL_PU_PD_RSEL_TYPE,/*189*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*190*/ MTK_PULL_PU_PD_RSEL_TYPE,/*191*/
+ MTK_PULL_PU_PD_TYPE,/*192*/ MTK_PULL_PU_PD_TYPE,/*193*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*194*/ MTK_PULL_PU_PD_RSEL_TYPE,/*195*/
+ MTK_PULL_PU_PD_TYPE,/*196*/ MTK_PULL_PU_PD_TYPE,/*197*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*198*/ MTK_PULL_PU_PD_RSEL_TYPE,/*199*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*200*/ MTK_PULL_PU_PD_RSEL_TYPE,/*201*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*202*/ MTK_PULL_PU_PD_RSEL_TYPE,/*203*/
+ MTK_PULL_PU_PD_TYPE,/*204*/ MTK_PULL_PU_PD_TYPE,/*205*/
+ MTK_PULL_PU_PD_TYPE,/*206*/ MTK_PULL_PU_PD_TYPE,/*207*/
+ MTK_PULL_PU_PD_TYPE,/*208*/ MTK_PULL_PU_PD_TYPE,/*209*/
+ MTK_PULL_PU_PD_TYPE,/*210*/ MTK_PULL_PU_PD_TYPE,/*211*/
+ MTK_PULL_PU_PD_TYPE,/*212*/ MTK_PULL_PU_PD_TYPE,/*213*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*214*/ MTK_PULL_PU_PD_RSEL_TYPE,/*215*/
+ MTK_PULL_PU_PD_TYPE,/*216*/ MTK_PULL_PUPD_R1R0_TYPE,/*217*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*218*/ MTK_PULL_PUPD_R1R0_TYPE,/*219*/
+ MTK_PULL_PU_PD_TYPE,/*220*/ MTK_PULL_PU_PD_TYPE,/*221*/
+ MTK_PULL_PU_PD_TYPE,/*222*/ MTK_PULL_PU_PD_TYPE,/*223*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*224*/ MTK_PULL_PUPD_R1R0_TYPE,/*225*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*226*/ MTK_PULL_PUPD_R1R0_TYPE,/*227*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*228*/ MTK_PULL_PUPD_R1R0_TYPE,/*229*/
+ MTK_PULL_PU_PD_TYPE,/*230*/ MTK_PULL_PU_PD_TYPE,/*231*/
+ MTK_PULL_PU_PD_TYPE,/*232*/ MTK_PULL_PU_PD_TYPE,/*233*/
+ MTK_PULL_PU_PD_TYPE,/*234*/ MTK_PULL_PU_PD_TYPE,/*235*/
+ MTK_PULL_PU_PD_TYPE,/*236*/ MTK_PULL_PU_PD_TYPE,/*237*/
+ MTK_PULL_PU_PD_TYPE,/*238*/ MTK_PULL_PU_PD_TYPE,/*239*/
+ MTK_PULL_PU_PD_TYPE,/*240*/ MTK_PULL_PU_PD_TYPE,/*241*/
+ MTK_PULL_PU_PD_TYPE,/*242*/ MTK_PULL_PU_PD_TYPE,/*243*/
+ MTK_PULL_PU_PD_TYPE,/*244*/ MTK_PULL_PU_PD_TYPE,/*245*/
+ MTK_PULL_PU_PD_TYPE,/*246*/ MTK_PULL_PU_PD_TYPE,/*247*/
+ MTK_PULL_PU_PD_TYPE,/*248*/ MTK_PULL_PU_PD_TYPE,/*249*/
+ MTK_PULL_PU_PD_TYPE,/*250*/ MTK_PULL_PU_PD_TYPE,/*251*/
+ MTK_PULL_PU_PD_TYPE,/*252*/ MTK_PULL_PU_PD_TYPE,/*253*/
+ MTK_PULL_PU_PD_TYPE,/*254*/ MTK_PULL_PU_PD_TYPE,/*255*/
+ MTK_PULL_PU_PD_TYPE,/*256*/ MTK_PULL_PU_PD_TYPE,/*257*/
+ MTK_PULL_PU_PD_TYPE,/*258*/ MTK_PULL_PUPD_R1R0_TYPE,/*259*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*260*/ MTK_PULL_PUPD_R1R0_TYPE,/*261*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*262*/ MTK_PULL_PUPD_R1R0_TYPE,/*263*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*264*/ MTK_PULL_PUPD_R1R0_TYPE,/*265*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*266*/ MTK_PULL_PUPD_R1R0_TYPE,/*267*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*268*/ MTK_PULL_PUPD_R1R0_TYPE,/*269*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*270*/
+};
+
+static const struct mtk_pin_reg_calc mt8196_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8196_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8196_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8196_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8196_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8196_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8196_pin_ies_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8196_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8196_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8196_pin_r1_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8196_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt8196_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8196_pin_drv_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8196_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8196_pin_rsel_range),
+};
+
+static const char * const mt8196_pinctrl_register_base_names[] = {
+ "iocfg0", "iocfg_rt", "iocfg_rm1", "iocfg_rm2",
+ "iocfg_rb", "iocfg_bm1", "iocfg_bm2", "iocfg_bm3",
+ "iocfg_lt", "iocfg_lm1", "iocfg_lm2", "iocfg_lb1",
+ "iocfg_lb2", "iocfg_tm1", "iocfg_tm2", "iocfg_tm3",
+};
+
+static const struct mtk_eint_hw mt8196_eint_hw = {
+ .port_mask = 0xf,
+ .ports = 3,
+ .ap_num = 293,
+ .db_cnt = 32,
+ .db_time = debounce_time_mt6765,
+};
+
+static const struct mtk_pin_soc mt8196_data = {
+ .reg_cal = mt8196_reg_cals,
+ .pins = mtk_pins_mt8196,
+ .npins = ARRAY_SIZE(mtk_pins_mt8196),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt8196),
+ .eint_hw = &mt8196_eint_hw,
+ .eint_pin = eint_pins_mt8196,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .base_names = mt8196_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt8196_pinctrl_register_base_names),
+ .pull_type = mt8196_pull_type,
+ .pin_rsel = mt8196_pin_rsel_val_range,
+ .npin_rsel = ARRAY_SIZE(mt8196_pin_rsel_val_range),
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
+};
+
+static const struct of_device_id mt8196_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt8196-pinctrl", .data = &mt8196_data },
+ { /* sentinel */ }
+};
+
+static struct platform_driver mt8196_pinctrl_driver = {
+ .driver = {
+ .name = "mt8196-pinctrl",
+ .of_match_table = mt8196_pinctrl_of_match,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
+ },
+ .probe = mtk_paris_pinctrl_probe,
+};
+
+static int __init mt8196_pinctrl_init(void)
+{
+ return platform_driver_register(&mt8196_pinctrl_driver);
+}
+arch_initcall(mt8196_pinctrl_init);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index d1556b75d9ef..4918d38abfc2 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -381,10 +381,13 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
return -ENOMEM;
count_reg_names = of_property_count_strings(np, "reg-names");
- if (count_reg_names < hw->soc->nbase_names)
+ if (count_reg_names < 0)
+ return -EINVAL;
+
+ hw->eint->nbase = count_reg_names - (int)hw->soc->nbase_names;
+ if (hw->eint->nbase <= 0)
return -EINVAL;
- hw->eint->nbase = count_reg_names - hw->soc->nbase_names;
hw->eint->base = devm_kmalloc_array(&pdev->dev, hw->eint->nbase,
sizeof(*hw->eint->base), GFP_KERNEL | __GFP_ZERO);
if (!hw->eint->base) {
@@ -416,7 +419,7 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
hw->eint->pctl = hw;
hw->eint->gpio_xlate = &mtk_eint_xt;
- ret = mtk_eint_do_init(hw->eint);
+ ret = mtk_eint_do_init(hw->eint, hw->soc->eint_pin);
if (ret)
goto err_free_eint;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 8596f3541265..a4cb6d511fcd 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -86,7 +86,7 @@ static int mtk_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
return 0;
}
-static void mtk_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int mtk_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
unsigned int reg_addr;
unsigned int bit;
@@ -100,7 +100,7 @@ static void mtk_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
else
reg_addr = CLR_ADDR(reg_addr, pctl);
- regmap_write(mtk_get_regmap(pctl, offset), reg_addr, bit);
+ return regmap_write(mtk_get_regmap(pctl, offset), reg_addr, bit);
}
static int mtk_pconf_set_ies_smt(struct mtk_pinctrl *pctl, unsigned pin,
@@ -809,7 +809,12 @@ static const struct pinmux_ops mtk_pmx_ops = {
static int mtk_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- mtk_gpio_set(chip, offset, value);
+ int ret;
+
+ ret = mtk_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
+
return pinctrl_gpio_direction_output(chip, offset);
}
@@ -893,7 +898,7 @@ static const struct gpio_chip mtk_gpio_chip = {
.direction_input = pinctrl_gpio_direction_input,
.direction_output = mtk_gpio_direction_output,
.get = mtk_gpio_get,
- .set = mtk_gpio_set,
+ .set_rv = mtk_gpio_set,
.to_irq = mtk_gpio_to_irq,
.set_config = mtk_gpio_set_config,
};
@@ -1039,7 +1044,7 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
pctl->eint->pctl = pctl;
pctl->eint->gpio_xlate = &mtk_eint_xt;
- return mtk_eint_do_init(pctl->eint);
+ return mtk_eint_do_init(pctl->eint, NULL);
}
/* This is used as a common probe function */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h
new file mode 100644
index 000000000000..0d3bb16411f8
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h
@@ -0,0 +1,2283 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Copyright (C) 2024 Collabora Ltd.
+ *
+ * Author: Andy Teng <andy.teng@mediatek.com>
+ */
+
+#ifndef __PINCTRL_MTK_MT6893_H
+#define __PINCTRL_MTK_MT6893_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt6893[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "SPI6_CLK"),
+ MTK_FUNCTION(2, "I2S5_MCK"),
+ MTK_FUNCTION(3, "PWM_0"),
+ MTK_FUNCTION(4, "MD_INT0"),
+ MTK_FUNCTION(5, "TP_GPIO0_AO")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "SPI6_CSB"),
+ MTK_FUNCTION(2, "I2S5_BCK"),
+ MTK_FUNCTION(3, "PWM_1"),
+ MTK_FUNCTION(4, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(5, "TP_GPIO1_AO")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "SPI6_MI"),
+ MTK_FUNCTION(2, "I2S5_LRCK"),
+ MTK_FUNCTION(3, "PWM_2"),
+ MTK_FUNCTION(4, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(5, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "SPI6_MO"),
+ MTK_FUNCTION(2, "I2S5_DO"),
+ MTK_FUNCTION(3, "PWM_3"),
+ MTK_FUNCTION(4, "CLKM0"),
+ MTK_FUNCTION(5, "TP_GPIO3_AO")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "SPI7_A_CLK"),
+ MTK_FUNCTION(2, "I2S2_MCK"),
+ MTK_FUNCTION(3, "DMIC1_CLK"),
+ MTK_FUNCTION(4, "PCM1_DI"),
+ MTK_FUNCTION(5, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "SPI7_A_CSB"),
+ MTK_FUNCTION(2, "I2S2_BCK"),
+ MTK_FUNCTION(3, "DMIC1_DAT"),
+ MTK_FUNCTION(4, "PCM1_CLK"),
+ MTK_FUNCTION(5, "TP_GPIO5_AO")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "SPI7_A_MI"),
+ MTK_FUNCTION(2, "I2S2_LRCK"),
+ MTK_FUNCTION(3, "DMIC_CLK"),
+ MTK_FUNCTION(4, "PCM1_SYNC"),
+ MTK_FUNCTION(5, "TP_GPIO6_AO"),
+ MTK_FUNCTION(6, "CONN_TCXOENA_REQ")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "SPI7_A_MO"),
+ MTK_FUNCTION(2, "I2S2_DI"),
+ MTK_FUNCTION(3, "DMIC_DAT"),
+ MTK_FUNCTION(4, "PCM1_DO0"),
+ MTK_FUNCTION(5, "TP_GPIO7_AO"),
+ MTK_FUNCTION(6, "WIFI_TXD")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "I2S2_DI2"),
+ MTK_FUNCTION(3, "KPCOL2"),
+ MTK_FUNCTION(4, "PCM1_DO1"),
+ MTK_FUNCTION(5, "CLKM1"),
+ MTK_FUNCTION(6, "CONN_BT_TXD")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(3, "KPROW2"),
+ MTK_FUNCTION(4, "PCM1_DO2"),
+ MTK_FUNCTION(5, "CLKM3"),
+ MTK_FUNCTION(6, "CMMCLK4")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "MSDC1_CLK_A"),
+ MTK_FUNCTION(2, "SPI4_B_CLK"),
+ MTK_FUNCTION(3, "I2S8_MCK"),
+ MTK_FUNCTION(4, "DSI1_TE"),
+ MTK_FUNCTION(5, "MD_INT0"),
+ MTK_FUNCTION(6, "TP_GPIO0_AO")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "MSDC1_CMD_A"),
+ MTK_FUNCTION(2, "SPI4_B_CSB"),
+ MTK_FUNCTION(3, "I2S8_BCK"),
+ MTK_FUNCTION(4, "LCM1_RST"),
+ MTK_FUNCTION(5, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(6, "TP_GPIO1_AO")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "MSDC1_DAT3_A"),
+ MTK_FUNCTION(2, "SPI4_B_MI"),
+ MTK_FUNCTION(3, "I2S8_LRCK"),
+ MTK_FUNCTION(4, "DMIC1_CLK"),
+ MTK_FUNCTION(5, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(6, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "MSDC1_DAT0_A"),
+ MTK_FUNCTION(2, "SPI4_B_MO"),
+ MTK_FUNCTION(3, "I2S8_DI"),
+ MTK_FUNCTION(4, "DMIC1_DAT"),
+ MTK_FUNCTION(5, "ANT_SEL10"),
+ MTK_FUNCTION(6, "TP_GPIO3_AO")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "MSDC1_DAT2_A"),
+ MTK_FUNCTION(2, "SPI5_C_CLK"),
+ MTK_FUNCTION(3, "I2S9_MCK"),
+ MTK_FUNCTION(4, "IDDIG"),
+ MTK_FUNCTION(5, "ANT_SEL11"),
+ MTK_FUNCTION(6, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "MSDC1_DAT1_A"),
+ MTK_FUNCTION(2, "SPI5_C_CSB"),
+ MTK_FUNCTION(3, "I2S9_BCK"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "ANT_SEL12"),
+ MTK_FUNCTION(6, "TP_GPIO5_AO")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "SPI5_C_MI"),
+ MTK_FUNCTION(3, "I2S9_LRCK"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(6, "TP_GPIO6_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPI5_C_MO"),
+ MTK_FUNCTION(3, "I2S9_DO"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(6, "TP_GPIO7_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "DP_TX_HPD"),
+ MTK_FUNCTION(2, "SPI4_C_MI"),
+ MTK_FUNCTION(3, "SPI1_B_MI"),
+ MTK_FUNCTION(4, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(5, "ANT_SEL10"),
+ MTK_FUNCTION(6, "MD_INT0")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "SPI4_C_MO"),
+ MTK_FUNCTION(3, "SPI1_B_MO"),
+ MTK_FUNCTION(4, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(5, "ANT_SEL11"),
+ MTK_FUNCTION(6, "MD_INT1_C2K_UIM0_HOT_PLUG")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPI4_C_CLK"),
+ MTK_FUNCTION(3, "SPI1_B_CLK"),
+ MTK_FUNCTION(4, "PWM_3"),
+ MTK_FUNCTION(5, "ANT_SEL12"),
+ MTK_FUNCTION(6, "MD_INT2_C2K_UIM1_HOT_PLUG")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "DP_TX_HPD"),
+ MTK_FUNCTION(2, "SPI4_C_CSB"),
+ MTK_FUNCTION(3, "SPI1_B_CSB"),
+ MTK_FUNCTION(4, "I2S7_MCK"),
+ MTK_FUNCTION(5, "I2S9_MCK"),
+ MTK_FUNCTION(6, "IDDIG")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "LCM1_RST"),
+ MTK_FUNCTION(2, "SPI0_C_CLK"),
+ MTK_FUNCTION(3, "SPI7_B_CLK"),
+ MTK_FUNCTION(4, "I2S7_BCK"),
+ MTK_FUNCTION(5, "I2S9_BCK"),
+ MTK_FUNCTION(6, "SCL13")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "DSI1_TE"),
+ MTK_FUNCTION(2, "SPI0_C_CSB"),
+ MTK_FUNCTION(3, "SPI7_B_CSB"),
+ MTK_FUNCTION(4, "I2S7_LRCK"),
+ MTK_FUNCTION(5, "I2S9_LRCK"),
+ MTK_FUNCTION(6, "SDA13")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "SPI0_C_MI"),
+ MTK_FUNCTION(3, "SPI7_B_MI"),
+ MTK_FUNCTION(4, "I2S6_DI"),
+ MTK_FUNCTION(5, "I2S8_DI"),
+ MTK_FUNCTION(6, "SCL_6306")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPI0_C_MO"),
+ MTK_FUNCTION(3, "SPI7_B_MO"),
+ MTK_FUNCTION(4, "I2S7_DO"),
+ MTK_FUNCTION(5, "I2S9_DO"),
+ MTK_FUNCTION(6, "SDA_6306")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "PWM_2"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "USB_DRVVBUS")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "PWM_3"),
+ MTK_FUNCTION(2, "CLKM1")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "PWM_0"),
+ MTK_FUNCTION(2, "CLKM2")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "PWM_1"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(3, "DSI1_TE")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "PWM_2"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "LCM1_RST")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "I2S3_MCK"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(3, "I2S5_MCK"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "I2S0_MCK")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "I2S3_BCK"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(3, "I2S5_BCK"),
+ MTK_FUNCTION(4, "PCM0_CLK"),
+ MTK_FUNCTION(5, "I2S0_BCK")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "I2S3_LRCK"),
+ MTK_FUNCTION(2, "I2S1_LRCK"),
+ MTK_FUNCTION(3, "I2S5_LRCK"),
+ MTK_FUNCTION(4, "PCM0_SYNC"),
+ MTK_FUNCTION(5, "I2S0_LRCK")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "I2S0_DI"),
+ MTK_FUNCTION(2, "I2S2_DI"),
+ MTK_FUNCTION(3, "I2S2_DI2"),
+ MTK_FUNCTION(4, "PCM0_DI"),
+ MTK_FUNCTION(5, "I2S0_DI")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "I2S3_DO"),
+ MTK_FUNCTION(2, "I2S1_DO"),
+ MTK_FUNCTION(3, "I2S5_DO"),
+ MTK_FUNCTION(4, "PCM0_DO")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "SPI5_A_CLK"),
+ MTK_FUNCTION(2, "DMIC1_CLK"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "MD_URXD0"),
+ MTK_FUNCTION(5, "UCTS0"),
+ MTK_FUNCTION(6, "URXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "SPI5_A_CSB"),
+ MTK_FUNCTION(2, "DMIC1_DAT"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "MD_UTXD0"),
+ MTK_FUNCTION(5, "URTS0"),
+ MTK_FUNCTION(6, "UTXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "SPI5_A_MI"),
+ MTK_FUNCTION(2, "DMIC_CLK"),
+ MTK_FUNCTION(3, "DSI1_TE"),
+ MTK_FUNCTION(4, "MD_URXD1"),
+ MTK_FUNCTION(5, "URXD0"),
+ MTK_FUNCTION(6, "UCTS1"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "SPI5_A_MO"),
+ MTK_FUNCTION(2, "DMIC_DAT"),
+ MTK_FUNCTION(3, "LCM1_RST"),
+ MTK_FUNCTION(4, "MD_UTXD1"),
+ MTK_FUNCTION(5, "UTXD0"),
+ MTK_FUNCTION(6, "URTS1"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "DISP_PWM"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "DSI_TE")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "LCM_RST")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(3, "SCL_6306"),
+ MTK_FUNCTION(4, "ADSP_URXD0"),
+ MTK_FUNCTION(5, "PTA_RXD"),
+ MTK_FUNCTION(6, "SSPM_URXD_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(3, "SDA_6306"),
+ MTK_FUNCTION(4, "ADSP_UTXD0"),
+ MTK_FUNCTION(5, "PTA_TXD"),
+ MTK_FUNCTION(6, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "APU_JTAG_TDI"),
+ MTK_FUNCTION(5, "CCU_JTAG_TDI"),
+ MTK_FUNCTION(6, "LVTS_SCK")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "APU_JTAG_TMS"),
+ MTK_FUNCTION(5, "CCU_JTAG_TMS"),
+ MTK_FUNCTION(6, "LVTS_SDI")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(4, "APU_JTAG_TDO"),
+ MTK_FUNCTION(5, "CCU_JTAG_TDO"),
+ MTK_FUNCTION(6, "LVTS_SCF")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "APU_JTAG_TRST"),
+ MTK_FUNCTION(5, "CCU_JTAG_TRST"),
+ MTK_FUNCTION(6, "LVTS_FOUT")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(4, "APU_JTAG_TCK"),
+ MTK_FUNCTION(5, "CCU_JTAG_TCK"),
+ MTK_FUNCTION(6, "LVTS_SDO")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(6, "LVTS_26M")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "PCM1_CLK"),
+ MTK_FUNCTION(3, "VPU_UDI_TCK"),
+ MTK_FUNCTION(4, "UDI_TCK"),
+ MTK_FUNCTION(5, "IPU_JTAG_TCK"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "JTCK_SEL3")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "PCM1_SYNC"),
+ MTK_FUNCTION(3, "VPU_UDI_TMS"),
+ MTK_FUNCTION(4, "UDI_TMS"),
+ MTK_FUNCTION(5, "IPU_JTAG_TMS"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "JTMS_SEL3")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "PCM1_DI")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "PCM1_DO0"),
+ MTK_FUNCTION(3, "VPU_UDI_TDI"),
+ MTK_FUNCTION(4, "UDI_TDI"),
+ MTK_FUNCTION(5, "IPU_JTAG_TDI"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "JTDI_SEL3")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "PCM1_DO2"),
+ MTK_FUNCTION(3, "VPU_UDI_NTRST"),
+ MTK_FUNCTION(4, "UDI_NTRST"),
+ MTK_FUNCTION(5, "IPU_JTAG_TRST"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "JTRSTN_SEL3")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "PCM1_DO1"),
+ MTK_FUNCTION(3, "VPU_UDI_TDO"),
+ MTK_FUNCTION(4, "UDI_TDO"),
+ MTK_FUNCTION(5, "IPU_JTAG_TDO"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "JTDO_SEL3")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "MIPI2_D_SCLK"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "MIPI2_D_SDATA"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "MIPI_M_SCLK"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "MIPI_M_SDATA"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "MD_UCNT_A_TGL"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "DIGRF_IRQ")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "BPI_BUS0"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "BPI_BUS1"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "BPI_BUS2"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "BPI_BUS3"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "BPI_BUS4")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "BPI_BUS5")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "BPI_BUS6"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS6")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "BPI_BUS7"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS7")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "BPI_BUS8"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS8")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "BPI_BUS9"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS9")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "BPI_BUS10"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS10")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "BPI_BUS11_OLAT0"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS11_OLAT0")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "BPI_BUS12_OLAT1"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS12_OLAT1")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "BPI_BUS13_OLAT2"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS13_OLAT2")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "BPI_BUS14_OLAT3"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS14_OLAT3")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "BPI_BUS15_OLAT4"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS15_OLAT4"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "BPI_BUS16_OLAT5"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS16_OLAT5"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "BPI_BUS17_ANT0"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS17_ANT0"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "BPI_BUS18_ANT1"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS18_ANT1"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "BPI_BUS19_ANT2"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS19_ANT2"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "BPI_BUS20_ANT3"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS20_ANT3"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "BPI_BUS21_ANT4"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS21_ANT4"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "MIPI1_D_SCLK"),
+ MTK_FUNCTION(2, "CONN_MIPI1_SCLK")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "MIPI1_D_SDATA"),
+ MTK_FUNCTION(2, "CONN_MIPI1_SDATA")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "MIPI0_D_SCLK"),
+ MTK_FUNCTION(2, "CONN_MIPI0_SCLK")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "MIPI0_D_SDATA"),
+ MTK_FUNCTION(2, "CONN_MIPI0_SDATA")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "SPMI_SCL"),
+ MTK_FUNCTION(2, "SCL10")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "SPMI_SDA"),
+ MTK_FUNCTION(2, "SDA10")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "AP_GOOD")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "MD_URXD0"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "CONN_BGF_UART0_RXD")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "MD_UTXD0"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "CONN_BGF_UART0_TXD"),
+ MTK_FUNCTION(6, "WIFI_TXD")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "ADSP_URXD0"),
+ MTK_FUNCTION(3, "MD32_0_RXD"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "TP_URXD1_AO"),
+ MTK_FUNCTION(6, "TP_URXD2_AO"),
+ MTK_FUNCTION(7, "MBISTREADEN_TRIGGER")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "ADSP_UTXD0"),
+ MTK_FUNCTION(3, "MD32_0_TXD"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "TP_UTXD1_AO"),
+ MTK_FUNCTION(6, "TP_UTXD2_AO"),
+ MTK_FUNCTION(7, "MBISTWRITEEN_TRIGGER")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "TDM_LRCK"),
+ MTK_FUNCTION(2, "I2S7_LRCK"),
+ MTK_FUNCTION(3, "I2S9_LRCK"),
+ MTK_FUNCTION(4, "SPI4_A_CLK"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JDI"),
+ MTK_FUNCTION(7, "IO_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "TDM_BCK"),
+ MTK_FUNCTION(2, "I2S7_BCK"),
+ MTK_FUNCTION(3, "I2S9_BCK"),
+ MTK_FUNCTION(4, "SPI4_A_CSB"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JINTP"),
+ MTK_FUNCTION(7, "IO_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "TDM_MCK"),
+ MTK_FUNCTION(2, "I2S7_MCK"),
+ MTK_FUNCTION(3, "I2S9_MCK"),
+ MTK_FUNCTION(4, "SPI4_A_MI"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JCK"),
+ MTK_FUNCTION(7, "IO_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "TDM_DATA0"),
+ MTK_FUNCTION(2, "I2S6_DI"),
+ MTK_FUNCTION(3, "I2S8_DI"),
+ MTK_FUNCTION(4, "SPI4_A_MO"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JDO"),
+ MTK_FUNCTION(7, "IO_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "TDM_DATA1"),
+ MTK_FUNCTION(2, "I2S7_DO"),
+ MTK_FUNCTION(3, "I2S9_DO"),
+ MTK_FUNCTION(4, "DP_TX_HPD"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JMS"),
+ MTK_FUNCTION(7, "IO_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "TDM_DATA2"),
+ MTK_FUNCTION(2, "DMIC1_CLK"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(4, "SPI5_B_CLK"),
+ MTK_FUNCTION(5, "CLKM0"),
+ MTK_FUNCTION(7, "DAP_MD32_SWD")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "TDM_DATA3"),
+ MTK_FUNCTION(2, "DMIC1_DAT"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(4, "SPI5_B_CSB"),
+ MTK_FUNCTION(5, "DP_TX_HPD"),
+ MTK_FUNCTION(6, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(7, "DAP_MD32_SWCK")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "SPI0_A_MI"),
+ MTK_FUNCTION(2, "SCP_SPI0_MI"),
+ MTK_FUNCTION(5, "DFD_TDO"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "JTDO_SEL1")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "SPI0_A_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI0_CS"),
+ MTK_FUNCTION(5, "DFD_TMS"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "JTMS_SEL1")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "SPI0_A_MO"),
+ MTK_FUNCTION(2, "SCP_SPI0_MO"),
+ MTK_FUNCTION(3, "SCP_SDA0"),
+ MTK_FUNCTION(5, "DFD_TDI"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "JTDI_SEL1")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "SPI0_A_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI0_CK"),
+ MTK_FUNCTION(3, "SCP_SCL0"),
+ MTK_FUNCTION(5, "DFD_TCK_XI"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "JTCK_SEL1")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "DMIC_CLK"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "CLKM2"),
+ MTK_FUNCTION(4, "SPI5_B_MI"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "JTRSTN_SEL1")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "DMIC_DAT"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "CLKM3"),
+ MTK_FUNCTION(4, "SPI5_B_MO"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWD")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "I2S1_MCK"),
+ MTK_FUNCTION(2, "I2S3_MCK"),
+ MTK_FUNCTION(3, "I2S2_MCK"),
+ MTK_FUNCTION(4, "DP_TX_HPD"),
+ MTK_FUNCTION(5, "I2S2_MCK"),
+ MTK_FUNCTION(6, "SRCLKENAI0"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWCK")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "I2S1_BCK"),
+ MTK_FUNCTION(2, "I2S3_BCK"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "PCM0_CLK"),
+ MTK_FUNCTION(5, "I2S2_BCK"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TDO")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "I2S1_LRCK"),
+ MTK_FUNCTION(2, "I2S3_LRCK"),
+ MTK_FUNCTION(3, "I2S2_LRCK"),
+ MTK_FUNCTION(4, "PCM0_SYNC"),
+ MTK_FUNCTION(5, "I2S2_LRCK"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TDI")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "I2S2_DI"),
+ MTK_FUNCTION(2, "I2S0_DI"),
+ MTK_FUNCTION(3, "I2S2_DI2"),
+ MTK_FUNCTION(4, "PCM0_DI"),
+ MTK_FUNCTION(5, "I2S2_DI"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TMS")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "I2S1_DO"),
+ MTK_FUNCTION(2, "I2S3_DO"),
+ MTK_FUNCTION(3, "I2S5_DO"),
+ MTK_FUNCTION(4, "PCM0_DO"),
+ MTK_FUNCTION(5, "I2S2_DI2"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TCK")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "SPI2_MI"),
+ MTK_FUNCTION(2, "SCP_SPI2_MI"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TRST_B")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "SPI2_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI2_CS"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_DBGI_N")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "SPI2_MO"),
+ MTK_FUNCTION(2, "SCP_SPI2_MO"),
+ MTK_FUNCTION(3, "SCP_SDA1"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_DBGACK_N")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "SPI2_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI2_CK"),
+ MTK_FUNCTION(3, "SCP_SCL1")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "SCL1"),
+ MTK_FUNCTION(2, "SCP_SCL0"),
+ MTK_FUNCTION(3, "SCP_SCL1")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "SDA1"),
+ MTK_FUNCTION(2, "SCP_SDA0"),
+ MTK_FUNCTION(3, "SCP_SDA1")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "SCL9")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "SDA9")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "SCL8")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "SDA8")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "SCL7"),
+ MTK_FUNCTION(2, "DMIC1_CLK")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "SDA7"),
+ MTK_FUNCTION(2, "DMIC1_DAT")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "CMFLASH0"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "TP_UCTS1_AO"),
+ MTK_FUNCTION(4, "UCTS0"),
+ MTK_FUNCTION(5, "SCL11"),
+ MTK_FUNCTION(6, "MD32_1_GPIO0")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "CMFLASH1"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "TP_URTS1_AO"),
+ MTK_FUNCTION(4, "URTS0"),
+ MTK_FUNCTION(5, "SDA11"),
+ MTK_FUNCTION(6, "MD32_1_GPIO1")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "CMFLASH2"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "TP_UCTS2_AO"),
+ MTK_FUNCTION(4, "UCTS1"),
+ MTK_FUNCTION(5, "SCL12"),
+ MTK_FUNCTION(6, "MD32_1_GPIO2")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "CMFLASH3"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "TP_URTS2_AO"),
+ MTK_FUNCTION(4, "URTS1"),
+ MTK_FUNCTION(5, "SDA12")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "CMVREF0"),
+ MTK_FUNCTION(2, "ANT_SEL10"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TDO"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TDO"),
+ MTK_FUNCTION(5, "SCL11"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "CMVREF1"),
+ MTK_FUNCTION(2, "ANT_SEL11"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TDI"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TDI"),
+ MTK_FUNCTION(5, "SDA11"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TDI"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "CMVREF2"),
+ MTK_FUNCTION(2, "ANT_SEL12"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TMS"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TMS"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TMS"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "CMVREF3"),
+ MTK_FUNCTION(2, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TCK"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TCK"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TCK"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "CMVREF4"),
+ MTK_FUNCTION(2, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TRSTN"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TRST"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TRST_B"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "PWM_0"),
+ MTK_FUNCTION(2, "SRCLKENAI1"),
+ MTK_FUNCTION(3, "MD_URXD0"),
+ MTK_FUNCTION(4, "MD32_0_RXD"),
+ MTK_FUNCTION(5, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_DBGI_N"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "CMMCLK3"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "MD_UTXD0"),
+ MTK_FUNCTION(4, "MD32_0_TXD"),
+ MTK_FUNCTION(5, "CONN_BT_TXD"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_DBGACK_N"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "CMMCLK4"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(4, "MD32_1_RXD"),
+ MTK_FUNCTION(5, "ILDO_DOUT0"),
+ MTK_FUNCTION(6, "CONN_BGF_UART0_RXD")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "CMMCLK5"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(4, "MD32_1_TXD"),
+ MTK_FUNCTION(5, "ILDO_DOUT1"),
+ MTK_FUNCTION(6, "CONN_BGF_UART0_TXD")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "SCL4")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "SDA4")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "SCL2")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "SDA2")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "CMVREF0"),
+ MTK_FUNCTION(2, "SPI3_CLK"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TDO"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TDO"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TDO"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JDO")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "CMVREF1"),
+ MTK_FUNCTION(2, "SPI3_CSB"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TDI"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TDI"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TDI"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JDI")
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "CMVREF2"),
+ MTK_FUNCTION(2, "SPI3_MI"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TMS"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TMS"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TMS"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JMS")
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "CMVREF3"),
+ MTK_FUNCTION(2, "SPI3_MO"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TCK"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TCK"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TCK"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JCK")
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "CMVREF4"),
+ MTK_FUNCTION(2, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TRSTN"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TRSTN"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TRST"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JINTP")
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "PWM_1"),
+ MTK_FUNCTION(2, "AGPS_SYNC"),
+ MTK_FUNCTION(3, "CMMCLK5"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_AICE_TMSC")
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "MD32_0_GPIO0"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_AICE_TCKC")
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "CMMCLK1"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "MD32_0_GPIO1"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_AICE_TMSC")
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(3, "MD32_0_GPIO2"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_AICE_TCKC")
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "DP_TX_HPD"),
+ MTK_FUNCTION(5, "DSI1_TE"),
+ MTK_FUNCTION(6, "MBISTREADEN_TRIGGER"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "KPCOL0"),
+ MTK_FUNCTION(7, "DBG_MON_A32")
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(5, "LCM1_RST"),
+ MTK_FUNCTION(6, "MBISTWRITEEN_TRIGGER"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "SPI1_A_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CK"),
+ MTK_FUNCTION(3, "MRG_CLK"),
+ MTK_FUNCTION(4, "AGPS_SYNC"),
+ MTK_FUNCTION(5, "SCL12"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "SPI1_A_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CS"),
+ MTK_FUNCTION(3, "MRG_SYNC"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "SDA12"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "SPI1_A_MI"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MI"),
+ MTK_FUNCTION(3, "MRG_DI"),
+ MTK_FUNCTION(4, "PTA_RXD"),
+ MTK_FUNCTION(5, "SCL13"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "SPI1_A_MO"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MO"),
+ MTK_FUNCTION(3, "MRG_DO"),
+ MTK_FUNCTION(4, "PTA_TXD"),
+ MTK_FUNCTION(5, "SDA13"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(2, "SCP_SCL0"),
+ MTK_FUNCTION(3, "SCP_SCL1")
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(2, "SCP_SDA0"),
+ MTK_FUNCTION(3, "SCP_SDA1")
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "ANT_SEL0"),
+ MTK_FUNCTION(2, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(1, "ANT_SEL1"),
+ MTK_FUNCTION(2, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(1, "ANT_SEL2"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_CK"),
+ MTK_FUNCTION(3, "TP_URXD1_AO"),
+ MTK_FUNCTION(5, "UCTS0"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO165"),
+ MTK_FUNCTION(1, "ANT_SEL3"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_CS"),
+ MTK_FUNCTION(3, "TP_UTXD1_AO"),
+ MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(5, "URTS0"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO166"),
+ MTK_FUNCTION(1, "ANT_SEL4"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_MI"),
+ MTK_FUNCTION(3, "TP_URXD2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "UCTS1"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO167"),
+ MTK_FUNCTION(1, "ANT_SEL5"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_MO"),
+ MTK_FUNCTION(3, "TP_UTXD2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "URTS1"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, 168),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO168"),
+ MTK_FUNCTION(1, "ANT_SEL6"),
+ MTK_FUNCTION(2, "SPI0_B_CLK"),
+ MTK_FUNCTION(3, "TP_UCTS1_AO"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "MD_UCTS0"),
+ MTK_FUNCTION(6, "SCL12"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, 169),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "ANT_SEL7"),
+ MTK_FUNCTION(2, "SPI0_B_CSB"),
+ MTK_FUNCTION(3, "TP_URTS1_AO"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "MD_URTS0"),
+ MTK_FUNCTION(6, "SDA12"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "ANT_SEL8"),
+ MTK_FUNCTION(2, "SPI0_B_MI"),
+ MTK_FUNCTION(3, "TP_UCTS2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "MD_UCTS1"),
+ MTK_FUNCTION(6, "SCL13")
+ ),
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "ANT_SEL9"),
+ MTK_FUNCTION(2, "SPI0_B_MO"),
+ MTK_FUNCTION(3, "TP_URTS2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "MD_URTS1"),
+ MTK_FUNCTION(6, "SDA13")
+ ),
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "CONN_TOP_CLK"),
+ MTK_FUNCTION(2, "AUXIF_CLK0"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "CONN_TOP_DATA"),
+ MTK_FUNCTION(2, "AUXIF_ST0"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "CONN_HRST_B"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "CONN_WB_PTA"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(0, 176),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO176"),
+ MTK_FUNCTION(1, "CONN_BT_CLK"),
+ MTK_FUNCTION(2, "AUXIF_CLK1"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(0, 177),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO177"),
+ MTK_FUNCTION(1, "CONN_BT_DATA"),
+ MTK_FUNCTION(2, "AUXIF_ST1"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 178),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO178"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL0"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 179),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO179"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL1"),
+ MTK_FUNCTION(2, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 180),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO180"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL2"),
+ MTK_FUNCTION(2, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 181),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO181"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL3"),
+ MTK_FUNCTION(2, "UFS_UNIPRO_SDA")
+ ),
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, 182),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO182"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL4"),
+ MTK_FUNCTION(2, "UFS_UNIPRO_SCL")
+ ),
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 183),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO183"),
+ MTK_FUNCTION(1, "MSDC0_CMD")
+ ),
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 184),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO184"),
+ MTK_FUNCTION(1, "MSDC0_DAT0")
+ ),
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 185),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO185"),
+ MTK_FUNCTION(1, "MSDC0_DAT2")
+ ),
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 186),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO186"),
+ MTK_FUNCTION(1, "MSDC0_DAT4")
+ ),
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 187),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO187"),
+ MTK_FUNCTION(1, "MSDC0_DAT6")
+ ),
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(0, 188),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO188"),
+ MTK_FUNCTION(1, "MSDC0_DAT1")
+ ),
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(0, 189),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO189"),
+ MTK_FUNCTION(1, "MSDC0_DAT5")
+ ),
+ MTK_PIN(
+ 190, "GPIO190",
+ MTK_EINT_FUNCTION(0, 190),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO190"),
+ MTK_FUNCTION(1, "MSDC0_DAT7")
+ ),
+ MTK_PIN(
+ 191, "GPIO191",
+ MTK_EINT_FUNCTION(0, 191),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO191"),
+ MTK_FUNCTION(1, "MSDC0_DSL"),
+ MTK_FUNCTION(2, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "DMIC_CLK"),
+ MTK_FUNCTION(5, "DSI1_TE")
+ ),
+ MTK_PIN(
+ 192, "GPIO192",
+ MTK_EINT_FUNCTION(0, 192),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO192"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(2, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "DMIC_DAT"),
+ MTK_FUNCTION(5, "LCM1_RST")
+ ),
+ MTK_PIN(
+ 193, "GPIO193",
+ MTK_EINT_FUNCTION(0, 193),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO193"),
+ MTK_FUNCTION(1, "MSDC0_DAT3")
+ ),
+ MTK_PIN(
+ 194, "GPIO194",
+ MTK_EINT_FUNCTION(0, 194),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO194"),
+ MTK_FUNCTION(1, "MSDC0_RSTB")
+ ),
+ MTK_PIN(
+ 195, "GPIO195",
+ MTK_EINT_FUNCTION(0, 195),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO195"),
+ MTK_FUNCTION(1, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ")
+ ),
+ MTK_PIN(
+ 196, "GPIO196",
+ MTK_EINT_FUNCTION(0, 196),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO196"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI2"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+ MTK_PIN(
+ 197, "GPIO197",
+ MTK_EINT_FUNCTION(0, 197),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO197"),
+ MTK_FUNCTION(1, "AUD_NLE_MOSI1"),
+ MTK_FUNCTION(2, "AUD_CLK_MISO"),
+ MTK_FUNCTION(3, "I2S2_MCK"),
+ MTK_FUNCTION(4, "I2S6_MCK"),
+ MTK_FUNCTION(5, "I2S8_MCK"),
+ MTK_FUNCTION(6, "UFS_UNIPRO_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+ MTK_PIN(
+ 198, "GPIO198",
+ MTK_EINT_FUNCTION(0, 198),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO198"),
+ MTK_FUNCTION(1, "AUD_NLE_MOSI0"),
+ MTK_FUNCTION(2, "AUD_SYNC_MISO"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "I2S6_BCK"),
+ MTK_FUNCTION(5, "I2S8_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+ MTK_PIN(
+ 199, "GPIO199",
+ MTK_EINT_FUNCTION(0, 199),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO199"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO2"),
+ MTK_FUNCTION(3, "I2S2_DI2"),
+ MTK_FUNCTION(7, "DBG_MON_B32")
+ ),
+ MTK_PIN(
+ 200, "GPIO200",
+ MTK_EINT_FUNCTION(0, 200),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO200"),
+ MTK_FUNCTION(1, "SCL6"),
+ MTK_FUNCTION(2, "SCP_SCL0"),
+ MTK_FUNCTION(3, "SCP_SCL1"),
+ MTK_FUNCTION(4, "SCL_6306")
+ ),
+ MTK_PIN(
+ 201, "GPIO201",
+ MTK_EINT_FUNCTION(0, 201),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO201"),
+ MTK_FUNCTION(1, "SDA6"),
+ MTK_FUNCTION(2, "SCP_SDA0"),
+ MTK_FUNCTION(3, "SCP_SDA1"),
+ MTK_FUNCTION(4, "SDA_6306")
+ ),
+ MTK_PIN(
+ 202, "GPIO202",
+ MTK_EINT_FUNCTION(0, 202),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO202"),
+ MTK_FUNCTION(1, "SCL5")
+ ),
+ MTK_PIN(
+ 203, "GPIO203",
+ MTK_EINT_FUNCTION(0, 203),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO203"),
+ MTK_FUNCTION(1, "SDA5")
+ ),
+ MTK_PIN(
+ 204, "GPIO204",
+ MTK_EINT_FUNCTION(0, 204),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO204"),
+ MTK_FUNCTION(1, "SCL0"),
+ MTK_FUNCTION(2, "SPI4_C_CLK"),
+ MTK_FUNCTION(3, "SPI7_B_CLK")
+ ),
+ MTK_PIN(
+ 205, "GPIO205",
+ MTK_EINT_FUNCTION(0, 205),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO205"),
+ MTK_FUNCTION(1, "SDA0"),
+ MTK_FUNCTION(2, "SPI4_C_CSB"),
+ MTK_FUNCTION(3, "SPI7_B_CSB")
+ ),
+ MTK_PIN(
+ 206, "GPIO206",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO206"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ 207, "GPIO207",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO207"),
+ MTK_FUNCTION(1, "SRCLKENA1")
+ ),
+ MTK_PIN(
+ 208, "GPIO208",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO208"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ 209, "GPIO209",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO209"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+ ),
+ MTK_PIN(
+ 210, "GPIO210",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO210"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+ ),
+ MTK_PIN(
+ 211, "GPIO211",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO211"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+ ),
+ MTK_PIN(
+ 212, "GPIO212",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO212"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+ ),
+ MTK_PIN(
+ 213, "GPIO213",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO213"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ 214, "GPIO214",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO214"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(3, "I2S1_MCK"),
+ MTK_FUNCTION(4, "I2S7_MCK"),
+ MTK_FUNCTION(5, "I2S9_MCK"),
+ MTK_FUNCTION(6, "UFS_UNIPRO_SCL")
+ ),
+ MTK_PIN(
+ 215, "GPIO215",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO215"),
+ MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
+ MTK_FUNCTION(3, "I2S1_BCK"),
+ MTK_FUNCTION(4, "I2S7_BCK"),
+ MTK_FUNCTION(5, "I2S9_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+ MTK_PIN(
+ 216, "GPIO216",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO216"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(3, "I2S1_LRCK"),
+ MTK_FUNCTION(4, "I2S7_LRCK"),
+ MTK_FUNCTION(5, "I2S9_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+ MTK_PIN(
+ 217, "GPIO217",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO217"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(3, "I2S1_DO"),
+ MTK_FUNCTION(4, "I2S7_DO"),
+ MTK_FUNCTION(5, "I2S9_DO"),
+ MTK_FUNCTION(6, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+ MTK_PIN(
+ 218, "GPIO218",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO218"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(2, "VOW_DAT_MISO"),
+ MTK_FUNCTION(3, "I2S2_LRCK"),
+ MTK_FUNCTION(4, "I2S6_LRCK"),
+ MTK_FUNCTION(5, "I2S8_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+ MTK_PIN(
+ 219, "GPIO219",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO219"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(2, "VOW_CLK_MISO"),
+ MTK_FUNCTION(3, "I2S2_DI"),
+ MTK_FUNCTION(4, "I2S6_DI"),
+ MTK_FUNCTION(5, "I2S8_DI"),
+ MTK_FUNCTION(6, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+ MTK_PIN(
+ 220, "GPIO220",
+ MTK_EINT_FUNCTION(0, 216),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 221, "GPIO221",
+ MTK_EINT_FUNCTION(0, 217),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 222, "GPIO222",
+ MTK_EINT_FUNCTION(0, 218),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 223, "GPIO223",
+ MTK_EINT_FUNCTION(0, 219),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 224, "GPIO224",
+ MTK_EINT_FUNCTION(0, 220),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 225, "GPIO225",
+ MTK_EINT_FUNCTION(0, 222),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 226, "GPIO226",
+ MTK_EINT_FUNCTION(0, 223),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT6893_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h
new file mode 100644
index 000000000000..c2a7e239a234
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h
@@ -0,0 +1,3085 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 MediaTek Inc.
+ * Author: Guodong Liu <Guodong.Liu@mediatek.com>
+ */
+
+#ifndef __PINCTRL_MTK_MT8196_H
+#define __PINCTRL_MTK_MT8196_H
+
+#include "pinctrl-paris.h"
+#define EINT_INVALID_BASE 0xff
+
+static const struct mtk_pin_desc mtk_pins_mt8196[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "DMIC1_CLK"),
+ MTK_FUNCTION(3, "SPI3_A_MO"),
+ MTK_FUNCTION(4, "FMI2S_B_LRCK"),
+ MTK_FUNCTION(5, "SCP_DMIC1_CLK"),
+ MTK_FUNCTION(6, "TP_GPIO14_AO")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "DMIC1_DAT"),
+ MTK_FUNCTION(2, "SRCLKENAI1"),
+ MTK_FUNCTION(3, "SPI3_A_MI"),
+ MTK_FUNCTION(4, "FMI2S_B_DI"),
+ MTK_FUNCTION(5, "SCP_DMIC1_DAT"),
+ MTK_FUNCTION(6, "TP_GPIO15_AO")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "PWM_VLP"),
+ MTK_FUNCTION(2, "DSI_HSYNC"),
+ MTK_FUNCTION(5, "RG_TSFDC_LDO_EN"),
+ MTK_FUNCTION(6, "TP_GPIO8_AO")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "MD_INT0"),
+ MTK_FUNCTION(2, "DSI1_HSYNC"),
+ MTK_FUNCTION(5, "DA_TSFDC_LDO_MODE"),
+ MTK_FUNCTION(6, "TP_GPIO9_AO")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "DISP_PWM1"),
+ MTK_FUNCTION(2, "MD32_0_GPIO0")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "LCM1_RST"),
+ MTK_FUNCTION(2, "SPI7_A_CLK")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "DSI1_TE"),
+ MTK_FUNCTION(2, "SPI7_A_CSB")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(2, "SPI7_A_MO"),
+ MTK_FUNCTION(3, "GPS_PPS0")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(2, "SPI7_A_MI"),
+ MTK_FUNCTION(3, "EDP_TX_HPD")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(3, "I2SIN1_LRCK"),
+ MTK_FUNCTION(7, "RG_TSFDC_LDO_REFSEL0")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(3, "I2SOUT1_DO"),
+ MTK_FUNCTION(7, "RG_TSFDC_LDO_REFSEL1")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(4, "FMI2S_B_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(3, "I2SIN1_DI_B")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "EDP_TX_HPD"),
+ MTK_FUNCTION(2, "GPS_PPS1")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "SRCLKENA2"),
+ MTK_FUNCTION(2, "DSI2_TE"),
+ MTK_FUNCTION(3, "SPMI_P_TRIG_FLAG"),
+ MTK_FUNCTION(5, "MD_INT3"),
+ MTK_FUNCTION(6, "TP_GPIO8_AO")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPMI_M_TRIG_FLAG"),
+ MTK_FUNCTION(3, "UCTS0"),
+ MTK_FUNCTION(4, "MD_INT4"),
+ MTK_FUNCTION(5, "I2SOUT2_DO"),
+ MTK_FUNCTION(6, "TP_GPIO9_AO")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "DP_TX_HPD"),
+ MTK_FUNCTION(3, "URTS0"),
+ MTK_FUNCTION(4, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(5, "KPROW2"),
+ MTK_FUNCTION(6, "TP_GPIO10_AO")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "MD_INT0"),
+ MTK_FUNCTION(2, "DP_OC_EN"),
+ MTK_FUNCTION(3, "UCTS1"),
+ MTK_FUNCTION(4, "MD_NTN_URXD1"),
+ MTK_FUNCTION(5, "KPCOL2"),
+ MTK_FUNCTION(6, "TP_GPIO11_AO")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "DMIC1_CLK"),
+ MTK_FUNCTION(2, "DP_RAUX_SBU1"),
+ MTK_FUNCTION(3, "URTS1"),
+ MTK_FUNCTION(4, "MD_NTN_UTXD1"),
+ MTK_FUNCTION(5, "I2SIN2_DI"),
+ MTK_FUNCTION(6, "TP_UTXD_GNSS_VLP")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "DMIC1_DAT"),
+ MTK_FUNCTION(2, "DP_RAUX_SBU2"),
+ MTK_FUNCTION(3, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(4, "CLKM3_A"),
+ MTK_FUNCTION(5, "I2SIN2_BCK"),
+ MTK_FUNCTION(6, "TP_URXD_GNSS_VLP")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "LCM2_RST"),
+ MTK_FUNCTION(3, "GPS_PPS1"),
+ MTK_FUNCTION(4, "CLKM2_A")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "BPI_BUS11"),
+ MTK_FUNCTION(2, "PCIE_PERSTN_1P"),
+ MTK_FUNCTION(3, "DSI1_TE"),
+ MTK_FUNCTION(4, "DMIC_CLK"),
+ MTK_FUNCTION(5, "SCP_DMIC_CLK")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "BPI_BUS12"),
+ MTK_FUNCTION(2, "PCIE_CLKREQN_1P"),
+ MTK_FUNCTION(3, "DSI2_TE"),
+ MTK_FUNCTION(4, "DMIC_DAT"),
+ MTK_FUNCTION(5, "SCP_DMIC_DAT")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "BPI_BUS13"),
+ MTK_FUNCTION(2, "PCIE_WAKEN_1P"),
+ MTK_FUNCTION(3, "DSI3_TE"),
+ MTK_FUNCTION(4, "DMIC1_CLK"),
+ MTK_FUNCTION(5, "SCP_DMIC1_CLK")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "BPI_BUS14"),
+ MTK_FUNCTION(2, "LCM1_RST"),
+ MTK_FUNCTION(3, "AGPS_SYNC"),
+ MTK_FUNCTION(4, "DMIC1_DAT"),
+ MTK_FUNCTION(5, "SCP_DMIC1_DAT"),
+ MTK_FUNCTION(6, "DISP_PWM1")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "BPI_BUS15"),
+ MTK_FUNCTION(2, "LCM2_RST"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(4, "DMIC2_CLK"),
+ MTK_FUNCTION(6, "DISP_PWM2")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "BPI_BUS16"),
+ MTK_FUNCTION(2, "LCM3_RST"),
+ MTK_FUNCTION(4, "DMIC2_DAT"),
+ MTK_FUNCTION(6, "DISP_PWM3")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "BPI_BUS17"),
+ MTK_FUNCTION(2, "UTXD4"),
+ MTK_FUNCTION(6, "DISP_PWM4"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "BPI_BUS18"),
+ MTK_FUNCTION(2, "URXD4"),
+ MTK_FUNCTION(3, "SPI2_A_MI"),
+ MTK_FUNCTION(4, "CLKM0_A"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "BPI_BUS19"),
+ MTK_FUNCTION(2, "MD_NTN_UTXD1"),
+ MTK_FUNCTION(3, "SPI2_A_MO"),
+ MTK_FUNCTION(4, "CLKM1_A"),
+ MTK_FUNCTION(6, "UCTS4"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "BPI_BUS20"),
+ MTK_FUNCTION(2, "MD_NTN_URXD1"),
+ MTK_FUNCTION(3, "SPI2_A_CLK"),
+ MTK_FUNCTION(4, "CLKM2_A"),
+ MTK_FUNCTION(5, "DSI3_HSYNC"),
+ MTK_FUNCTION(6, "URTS4"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "BPI_BUS21"),
+ MTK_FUNCTION(3, "SPI2_A_CSB"),
+ MTK_FUNCTION(4, "CLKM3_A"),
+ MTK_FUNCTION(6, "EDP_TX_HPD"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "LCM4_RST"),
+ MTK_FUNCTION(2, "DP_TX_HPD"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TCK_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TCK_VLP"),
+ MTK_FUNCTION(6, "SPU0_TCK"),
+ MTK_FUNCTION(7, "IO_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "DSI4_TE"),
+ MTK_FUNCTION(2, "DP_OC_EN"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TRSTN_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TRSTN_VLP"),
+ MTK_FUNCTION(6, "SPU0_NTRST"),
+ MTK_FUNCTION(7, "IO_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "UCTS5"),
+ MTK_FUNCTION(2, "DP_RAUX_SBU1"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TDI_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDI_VLP"),
+ MTK_FUNCTION(6, "SPU0_TDI"),
+ MTK_FUNCTION(7, "IO_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "URTS5"),
+ MTK_FUNCTION(2, "DP_RAUX_SBU2"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TDO_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDO_VLP"),
+ MTK_FUNCTION(6, "SPU0_TDO"),
+ MTK_FUNCTION(7, "IO_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "UTXD5"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TMS_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TMS_VLP"),
+ MTK_FUNCTION(6, "SPU0_TMS"),
+ MTK_FUNCTION(7, "IO_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "URXD5"),
+ MTK_FUNCTION(3, "MD_INT3"),
+ MTK_FUNCTION(4, "CLKM0_B"),
+ MTK_FUNCTION(5, "TP_GPIO5_AO"),
+ MTK_FUNCTION(6, "SPU0_UTX"),
+ MTK_FUNCTION(7, "DAP_MD32_SWCK")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(2, "SPMI_P_TRIG_FLAG"),
+ MTK_FUNCTION(3, "MD_INT4"),
+ MTK_FUNCTION(4, "CLKM1_B"),
+ MTK_FUNCTION(5, "TP_GPIO6_AO"),
+ MTK_FUNCTION(6, "SPU0_URX"),
+ MTK_FUNCTION(7, "DAP_MD32_SWD")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "I2S_MCK0"),
+ MTK_FUNCTION(3, "GPS_PPS0"),
+ MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "I2SIN6_0_BCK"),
+ MTK_FUNCTION(3, "SPI4_B_CLK"),
+ MTK_FUNCTION(4, "UCTS2"),
+ MTK_FUNCTION(5, "CCU1_UTXD"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "I2SIN6_0_LRCK"),
+ MTK_FUNCTION(3, "SPI4_B_CSB"),
+ MTK_FUNCTION(4, "URTS2"),
+ MTK_FUNCTION(5, "CCU1_URXD"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "I2SIN6_0_DI"),
+ MTK_FUNCTION(3, "SPI4_B_MI"),
+ MTK_FUNCTION(4, "URXD2"),
+ MTK_FUNCTION(5, "CCU1_URTS"),
+ MTK_FUNCTION(6, "MD32_0_RXD"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "I2SOUT6_0_DO"),
+ MTK_FUNCTION(3, "SPI4_B_MO"),
+ MTK_FUNCTION(4, "UTXD2"),
+ MTK_FUNCTION(5, "CCU1_UCTS"),
+ MTK_FUNCTION(6, "MD32_0_TXD"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(3, "SPI3_A_CLK"),
+ MTK_FUNCTION(6, "TP_GPIO10_AO")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(2, "DSI2_HSYNC"),
+ MTK_FUNCTION(3, "SPI3_A_CSB"),
+ MTK_FUNCTION(4, "PWM_VLP"),
+ MTK_FUNCTION(6, "TP_GPIO11_AO")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "SCP_SCL4"),
+ MTK_FUNCTION(2, "PWM_VLP"),
+ MTK_FUNCTION(4, "SCP_ILDO_DTEST1_VLP"),
+ MTK_FUNCTION(5, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(6, "TP_GPIO0_AO")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "SCP_SDA4"),
+ MTK_FUNCTION(4, "SCP_ILDO_DTEST2_VLP"),
+ MTK_FUNCTION(5, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(6, "TP_GPIO1_AO")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "SCP_SCL5"),
+ MTK_FUNCTION(2, "PWM_VLP"),
+ MTK_FUNCTION(3, "CCU0_UTXD"),
+ MTK_FUNCTION(4, "SCP_ILDO_DTEST3_VLP"),
+ MTK_FUNCTION(6, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "SCP_SDA5"),
+ MTK_FUNCTION(3, "CCU0_URXD"),
+ MTK_FUNCTION(4, "SCP_ILDO_DTEST4_VLP"),
+ MTK_FUNCTION(6, "TP_GPIO3_AO")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "SCP_SCL6"),
+ MTK_FUNCTION(2, "PWM_VLP"),
+ MTK_FUNCTION(3, "CCU0_URTS"),
+ MTK_FUNCTION(4, "DSI_HSYNC"),
+ MTK_FUNCTION(6, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "SCP_SDA6"),
+ MTK_FUNCTION(3, "CCU0_UCTS"),
+ MTK_FUNCTION(4, "DSI1_HSYNC"),
+ MTK_FUNCTION(6, "TP_GPIO5_AO")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "SCP_SCL1"),
+ MTK_FUNCTION(3, "TDM_DATA2")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "SCP_SDA1"),
+ MTK_FUNCTION(3, "TDM_DATA3")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(3, "TDM_MCK")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "AUD_CLK_MISO"),
+ MTK_FUNCTION(2, "I2SOUT2_BCK"),
+ MTK_FUNCTION(3, "TDM_BCK")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(2, "I2SOUT2_LRCK"),
+ MTK_FUNCTION(3, "TDM_LRCK")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(2, "I2SOUT2_DO"),
+ MTK_FUNCTION(3, "TDM_DATA0")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(3, "TDM_DATA1")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(3, "I2SIN1_BCK")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "KPCOL0"),
+ MTK_FUNCTION(6, "TP_GPIO13_AO")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "MCU_M_PMIC_POC_I")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "MCU_B_PMIC_POC_I")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "MFG_PMIC_POC_I")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "PRE_UVLO")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "DPM2PMIC"),
+ MTK_FUNCTION(2, "SRCLKENA1")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "SCP_VREQ_VAO")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "CMFLASH0")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "DCXO_FPM_LPM")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "SPMI_M_SCL")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "SPMI_M_SDA")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "SPMI_P_SCL")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "SPMI_P_SDA")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(2, "MD_INT4")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "CMMCLK1")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "SCP_SPI0_CK"),
+ MTK_FUNCTION(2, "SPI6_B_CLK"),
+ MTK_FUNCTION(3, "PWM_VLP"),
+ MTK_FUNCTION(4, "I2SOUT5_BCK"),
+ MTK_FUNCTION(6, "TP_GPIO0_AO")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "SCP_SPI0_CS"),
+ MTK_FUNCTION(2, "SPI6_B_CSB"),
+ MTK_FUNCTION(4, "I2SOUT5_LRCK"),
+ MTK_FUNCTION(6, "TP_GPIO1_AO")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "SCP_SPI0_MO"),
+ MTK_FUNCTION(2, "SPI6_B_MO"),
+ MTK_FUNCTION(4, "I2SOUT5_DATA0"),
+ MTK_FUNCTION(6, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "SCP_SPI0_MI"),
+ MTK_FUNCTION(2, "SPI6_B_MI"),
+ MTK_FUNCTION(4, "I2SOUT5_DATA1"),
+ MTK_FUNCTION(6, "TP_GPIO3_AO")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "SCP_SPI1_CK"),
+ MTK_FUNCTION(2, "SPI7_B_CLK"),
+ MTK_FUNCTION(4, "I2SIN5_DATA0"),
+ MTK_FUNCTION(5, "PWM_VLP"),
+ MTK_FUNCTION(6, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "SCP_SPI1_CS"),
+ MTK_FUNCTION(2, "SPI7_B_CSB"),
+ MTK_FUNCTION(4, "I2SIN5_DATA1"),
+ MTK_FUNCTION(6, "TP_GPIO5_AO")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "SCP_SPI1_MO"),
+ MTK_FUNCTION(2, "SPI7_B_MO"),
+ MTK_FUNCTION(4, "I2SIN5_BCK"),
+ MTK_FUNCTION(6, "TP_GPIO6_AO")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "SCP_SPI1_MI"),
+ MTK_FUNCTION(2, "SPI7_B_MI"),
+ MTK_FUNCTION(4, "I2SIN5_LRCK"),
+ MTK_FUNCTION(6, "TP_GPIO7_AO")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "DSI_TE"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(2, "LCM1_RST"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "CMFLASH2"),
+ MTK_FUNCTION(2, "SF_D0"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "KPCOL2"),
+ MTK_FUNCTION(6, "TP_GPIO11_AO")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "CMFLASH3"),
+ MTK_FUNCTION(2, "SF_D1"),
+ MTK_FUNCTION(4, "DISP_PWM1"),
+ MTK_FUNCTION(6, "TP_GPIO12_AO")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "CMFLASH1"),
+ MTK_FUNCTION(2, "SF_D2"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "KPROW2"),
+ MTK_FUNCTION(6, "TP_GPIO13_AO")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "I2S_MCK1"),
+ MTK_FUNCTION(2, "SF_D3"),
+ MTK_FUNCTION(4, "MD32_0_GPIO0"),
+ MTK_FUNCTION(5, "CLKM0_A"),
+ MTK_FUNCTION(6, "TP_GPIO14_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "I2SIN1_BCK"),
+ MTK_FUNCTION(2, "I2SIN4_BCK"),
+ MTK_FUNCTION(3, "SPI6_A_CLK"),
+ MTK_FUNCTION(4, "MD32_1_GPIO0"),
+ MTK_FUNCTION(5, "CLKM1_A"),
+ MTK_FUNCTION(6, "TP_GPIO15_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "I2SIN1_LRCK"),
+ MTK_FUNCTION(2, "I2SIN4_LRCK"),
+ MTK_FUNCTION(3, "SPI6_A_CSB"),
+ MTK_FUNCTION(4, "MD32_2_GPIO0"),
+ MTK_FUNCTION(5, "CLKM2_A"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "I2SIN1_DI_A"),
+ MTK_FUNCTION(2, "I2SIN4_DATA0"),
+ MTK_FUNCTION(3, "SPI6_A_MO"),
+ MTK_FUNCTION(4, "MD32_3_GPIO0"),
+ MTK_FUNCTION(5, "CLKM3_A"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "I2SOUT1_DO"),
+ MTK_FUNCTION(2, "I2SOUT4_DATA0"),
+ MTK_FUNCTION(3, "SPI6_A_MI"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "SCL0"),
+ MTK_FUNCTION(2, "LCM2_RST"),
+ MTK_FUNCTION(3, "AUD_DAC_26M_CLK"),
+ MTK_FUNCTION(4, "SPU0_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "SDA0"),
+ MTK_FUNCTION(2, "DSI2_TE"),
+ MTK_FUNCTION(4, "SPU0_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "SCL10"),
+ MTK_FUNCTION(2, "SF_CS"),
+ MTK_FUNCTION(3, "SCP_DMIC1_CLK"),
+ MTK_FUNCTION(4, "I2SIN5_DATA2"),
+ MTK_FUNCTION(5, "SCP_SCL_OIS"),
+ MTK_FUNCTION(6, "TP_GPIO10_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "SDA10"),
+ MTK_FUNCTION(2, "SF_CK"),
+ MTK_FUNCTION(3, "SCP_DMIC1_DAT"),
+ MTK_FUNCTION(4, "I2SIN5_DATA3"),
+ MTK_FUNCTION(5, "SCP_SDA_OIS"),
+ MTK_FUNCTION(6, "TP_GPIO11_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "DISP_PWM"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(5, "I2S_MCK0"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "SCL6"),
+ MTK_FUNCTION(2, "SPU1_SCL"),
+ MTK_FUNCTION(3, "AUD_DAC_26M_CLK"),
+ MTK_FUNCTION(4, "USB_DRVVBUS_2P"),
+ MTK_FUNCTION(5, "I2S_MCK1"),
+ MTK_FUNCTION(6, "IDDIG_2P"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "SDA6"),
+ MTK_FUNCTION(2, "SPU1_SDA"),
+ MTK_FUNCTION(3, "DISP_PWM2"),
+ MTK_FUNCTION(4, "VBUSVALID_2P"),
+ MTK_FUNCTION(5, "I2S_MCK2"),
+ MTK_FUNCTION(6, "VBUSVALID_3P"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "SCP_SPI3_CK"),
+ MTK_FUNCTION(2, "SPI3_B_CLK"),
+ MTK_FUNCTION(3, "MD_UTXD0"),
+ MTK_FUNCTION(4, "TP_UTXD1_VLP"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART0_TXD"),
+ MTK_FUNCTION(6, "TP_GPIO6_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "SCP_SPI3_CS"),
+ MTK_FUNCTION(2, "SPI3_B_CSB"),
+ MTK_FUNCTION(3, "MD_URXD0"),
+ MTK_FUNCTION(4, "TP_URXD1_VLP"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART0_RXD"),
+ MTK_FUNCTION(6, "TP_GPIO7_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "SCP_SPI3_MO"),
+ MTK_FUNCTION(2, "SPI3_B_MO"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(4, "MD32PCM_UTXD_AO_VLP"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART1_TXD"),
+ MTK_FUNCTION(6, "TP_GPIO8_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "SCP_SPI3_MI"),
+ MTK_FUNCTION(2, "SPI3_B_MI"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(4, "MD32PCM_URXD_AO_VLP"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART1_RXD"),
+ MTK_FUNCTION(6, "TP_GPIO9_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "SPI1_CLK"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "MD_UCTS0"),
+ MTK_FUNCTION(4, "TP_UCTS1_VLP"),
+ MTK_FUNCTION(6, "SPU0_GPIO_O"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "SPI1_CSB"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "MD_URTS0"),
+ MTK_FUNCTION(4, "TP_URTS1_VLP"),
+ MTK_FUNCTION(6, "SPU0_GPIO_I"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "SPI1_MO"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "MD_UCTS1"),
+ MTK_FUNCTION(6, "SPU1_GPIO_O"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "SPI1_MI"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "MD_URTS1"),
+ MTK_FUNCTION(6, "SPU1_GPIO_I"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "SPI0_SPU_CLK"),
+ MTK_FUNCTION(2, "SPI4_A_CLK"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_DBG_UART_TX"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "SPI0_SPU_CSB"),
+ MTK_FUNCTION(2, "SPI4_A_CSB"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "SPI0_SPU_MO"),
+ MTK_FUNCTION(2, "SPI4_A_MO"),
+ MTK_FUNCTION(3, "LCM1_RST"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "SPI0_SPU_MI"),
+ MTK_FUNCTION(2, "SPI4_A_MI"),
+ MTK_FUNCTION(3, "DSI1_TE"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "SPI5_CLK"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "DP_TX_HPD"),
+ MTK_FUNCTION(4, "AD_ILDO_DTEST0")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "SPI5_CSB"),
+ MTK_FUNCTION(2, "VBUSVALID"),
+ MTK_FUNCTION(3, "DP_OC_EN"),
+ MTK_FUNCTION(4, "AD_ILDO_DTEST1")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "SPI5_MO"),
+ MTK_FUNCTION(2, "LCM2_RST"),
+ MTK_FUNCTION(3, "DP_RAUX_SBU1"),
+ MTK_FUNCTION(4, "AD_ILDO_DTEST2"),
+ MTK_FUNCTION(6, "IDDIG_3P")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "SPI5_MI"),
+ MTK_FUNCTION(2, "DSI2_TE"),
+ MTK_FUNCTION(3, "DP_RAUX_SBU2"),
+ MTK_FUNCTION(4, "AD_ILDO_DTEST3"),
+ MTK_FUNCTION(6, "USB_DRVVBUS_3P"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "AP_GOOD"),
+ MTK_FUNCTION(2, "CONN_TCXOENA_REQ")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(5, "I2SIN2_LRCK"),
+ MTK_FUNCTION(6, "TP_UTXD_MD_VCORE")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(6, "TP_URXD_MD_VCORE")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TCK"),
+ MTK_FUNCTION(4, "UDI_TCK"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JCK"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TCK_VLP"),
+ MTK_FUNCTION(7, "JTCK2_SEL1")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TMS"),
+ MTK_FUNCTION(4, "UDI_TMS"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JMS"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TMS_VLP"),
+ MTK_FUNCTION(7, "JTMS2_SEL1")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TDI"),
+ MTK_FUNCTION(4, "UDI_TDI_0"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JDI"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDI_VLP"),
+ MTK_FUNCTION(7, "JTDI2_SEL1")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TDO"),
+ MTK_FUNCTION(4, "UDI_TDO_0"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JDO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDO_VLP"),
+ MTK_FUNCTION(7, "JTDO2_SEL1")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "DSI2_HSYNC"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TRSTN"),
+ MTK_FUNCTION(4, "UDI_NTRST"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TRSTN_VLP"),
+ MTK_FUNCTION(7, "JTRSTN2_SEL1")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "DSI3_HSYNC"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JINTP")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "CLKM0_A"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JDI"),
+ MTK_FUNCTION(6, "TSFDC_SCK"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TDI_VCORE")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "CLKM1_B"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JMS"),
+ MTK_FUNCTION(6, "TSFDC_SDI"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TMS_VCORE")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JDO"),
+ MTK_FUNCTION(6, "TSFDC_SCF"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TDO_VCORE")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(6, "TSFDC_26M")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JCK"),
+ MTK_FUNCTION(6, "TSFDC_SDO"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TCK_VCORE")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JINTP"),
+ MTK_FUNCTION(6, "TSFDC_FOUT"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TRSTN_VCORE")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "MIPI0_D_SCLK"),
+ MTK_FUNCTION(2, "BPI_BUS16"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "MIPI0_D_SDATA"),
+ MTK_FUNCTION(2, "BPI_BUS17"),
+ MTK_FUNCTION(4, "PCM0_LRCK"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "MIPI1_D_SCLK"),
+ MTK_FUNCTION(2, "BPI_BUS18"),
+ MTK_FUNCTION(4, "MD_GPS_BLANK"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "MIPI1_D_SDATA"),
+ MTK_FUNCTION(2, "BPI_BUS19"),
+ MTK_FUNCTION(4, "MD_URXD1_CONN"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "MIPI2_D_SCLK"),
+ MTK_FUNCTION(2, "BPI_BUS20"),
+ MTK_FUNCTION(4, "MD_UTXD1_CONN"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "MIPI2_D_SDATA"),
+ MTK_FUNCTION(2, "BPI_BUS21"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TRSTN_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "MIPI3_D_SCLK"),
+ MTK_FUNCTION(2, "BPI_BUS22"),
+ MTK_FUNCTION(4, "TP_UTXD_GNSS_VLP"),
+ MTK_FUNCTION(5, "MD_UTXD1_CONN"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TCK_VCORE")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "MIPI3_D_SDATA"),
+ MTK_FUNCTION(2, "BPI_BUS23"),
+ MTK_FUNCTION(4, "TP_URXD_GNSS_VLP"),
+ MTK_FUNCTION(5, "MD_URXD1_CONN"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TMS_VCORE")
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "BPI_BUS0"),
+ MTK_FUNCTION(4, "PCIE_WAKEN_1P"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDO_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "BPI_BUS1"),
+ MTK_FUNCTION(4, "PCIE_PERSTN_1P"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDI_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "BPI_BUS2"),
+ MTK_FUNCTION(2, "AUD_DAC_26M_CLK"),
+ MTK_FUNCTION(4, "PCIE_CLKREQN_1P"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TRSTN_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "BPI_BUS3"),
+ MTK_FUNCTION(2, "AUD_DAC_26M_CLK"),
+ MTK_FUNCTION(4, "TP_UTXD_MD_VLP"),
+ MTK_FUNCTION(5, "TP_GPIO0_AO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TCK_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "BPI_BUS4"),
+ MTK_FUNCTION(2, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "TP_URXD_MD_VLP"),
+ MTK_FUNCTION(5, "TP_GPIO1_AO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TMS_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "BPI_BUS5"),
+ MTK_FUNCTION(2, "GPS_PPS0"),
+ MTK_FUNCTION(5, "TP_GPIO2_AO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDO_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "BPI_BUS6"),
+ MTK_FUNCTION(2, "GPS_PPS1"),
+ MTK_FUNCTION(5, "TP_GPIO3_AO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDI_VCORE")
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "BPI_BUS7"),
+ MTK_FUNCTION(2, "EDP_TX_HPD"),
+ MTK_FUNCTION(5, "AGPS_SYNC"),
+ MTK_FUNCTION(6, "SSPM_UTXD_AO_VCORE")
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "MD_UCNT_A_TGL"),
+ MTK_FUNCTION(6, "TP_URTS1_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "DIGRF_IRQ"),
+ MTK_FUNCTION(6, "TP_UCTS1_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "MIPI_M_SCLK"),
+ MTK_FUNCTION(4, "UCTS2"),
+ MTK_FUNCTION(6, "TP_UTXD_CONSYS_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "MIPI_M_SDATA"),
+ MTK_FUNCTION(4, "URTS2"),
+ MTK_FUNCTION(6, "TP_URXD_CONSYS_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "BPI_BUS8"),
+ MTK_FUNCTION(4, "UTXD2"),
+ MTK_FUNCTION(5, "CLKM0_A"),
+ MTK_FUNCTION(6, "SSPM_URXD_AO_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "BPI_BUS9"),
+ MTK_FUNCTION(4, "URXD2"),
+ MTK_FUNCTION(5, "CLKM1_A"),
+ MTK_FUNCTION(6, "TP_UTXD1_VCORE")
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "BPI_BUS10"),
+ MTK_FUNCTION(2, "MD_INT0"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "CLKM2_A"),
+ MTK_FUNCTION(6, "TP_URXD1_VCORE")
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "MD_UTXD1"),
+ MTK_FUNCTION(5, "MBISTREADEN_TRIGGER"),
+ MTK_FUNCTION(6, "CONN_BG_GPS_MCU_DBG_UART_TX")
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "MD_URXD1"),
+ MTK_FUNCTION(5, "MBISTWRITEEN_TRIGGER")
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "MD_UTXD0"),
+ MTK_FUNCTION(3, "TP_UTXD1_VLP"),
+ MTK_FUNCTION(4, "ADSP_UTXD0"),
+ MTK_FUNCTION(5, "SSPM_UTXD_AO_VLP"),
+ MTK_FUNCTION(6, "HFRP_UTXD1")
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "MD_URXD0"),
+ MTK_FUNCTION(3, "TP_URXD1_VLP"),
+ MTK_FUNCTION(4, "ADSP_URXD0"),
+ MTK_FUNCTION(5, "SSPM_URXD_AO_VLP"),
+ MTK_FUNCTION(6, "HFRP_URXD1")
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(1, "SCP_SCL0"),
+ MTK_FUNCTION(6, "TP_GPIO0_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO165"),
+ MTK_FUNCTION(1, "SCP_SDA0"),
+ MTK_FUNCTION(6, "TP_GPIO1_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO166"),
+ MTK_FUNCTION(1, "SCP_SCL2"),
+ MTK_FUNCTION(6, "TP_GPIO2_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO167"),
+ MTK_FUNCTION(1, "SCP_SDA2"),
+ MTK_FUNCTION(6, "TP_GPIO3_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, 168),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO168"),
+ MTK_FUNCTION(1, "SCP_SPI2_CK"),
+ MTK_FUNCTION(2, "SPI2_B_CLK"),
+ MTK_FUNCTION(3, "PWM_VLP"),
+ MTK_FUNCTION(4, "SCP_SCL2"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, 169),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "SCP_SPI2_CS"),
+ MTK_FUNCTION(2, "SPI2_B_CSB"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "SCP_SPI2_MO"),
+ MTK_FUNCTION(2, "SPI2_B_MO"),
+ MTK_FUNCTION(4, "SCP_SDA2"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "SCP_SPI2_MI"),
+ MTK_FUNCTION(2, "SPI2_B_MI"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "CONN_TCXOENA_REQ")
+ ),
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "CMFLASH3"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "MD_GPS_L5_BLANK"),
+ MTK_FUNCTION(4, "CLKM1_A"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "CMFLASH0"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "VBUSVALID_1P"),
+ MTK_FUNCTION(4, "MD32_2_RXD"),
+ MTK_FUNCTION(5, "DISP_PWM3")
+ ),
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "CMFLASH1"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "EDP_TX_HPD"),
+ MTK_FUNCTION(4, "MD32_2_TXD"),
+ MTK_FUNCTION(5, "DISP_PWM4")
+ ),
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO176"),
+ MTK_FUNCTION(1, "SCL5"),
+ MTK_FUNCTION(2, "LCM3_RST"),
+ MTK_FUNCTION(4, "MD_URXD1_CONN"),
+ MTK_FUNCTION(6, "TP_UTXD_GNSS_VCORE")
+ ),
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO177"),
+ MTK_FUNCTION(1, "SDA5"),
+ MTK_FUNCTION(2, "DSI3_TE"),
+ MTK_FUNCTION(4, "MD_UTXD1_CONN"),
+ MTK_FUNCTION(6, "TP_URXD_GNSS_VCORE")
+ ),
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 178),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO178"),
+ MTK_FUNCTION(1, "DMIC_CLK"),
+ MTK_FUNCTION(2, "SCP_DMIC_CLK"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(4, "CLKM2_B"),
+ MTK_FUNCTION(5, "TP_GPIO7_AO"),
+ MTK_FUNCTION(6, "SPU1_UTX"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWCK")
+ ),
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 179),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO179"),
+ MTK_FUNCTION(1, "DMIC_DAT"),
+ MTK_FUNCTION(2, "SCP_DMIC_DAT"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(4, "CLKM3_B"),
+ MTK_FUNCTION(5, "TP_GPIO8_AO"),
+ MTK_FUNCTION(6, "SPU1_URX"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWD")
+ ),
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 180),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO180"),
+ MTK_FUNCTION(1, "IDDIG_1P"),
+ MTK_FUNCTION(2, "CMVREF0"),
+ MTK_FUNCTION(3, "GPS_PPS1"),
+ MTK_FUNCTION(4, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(5, "DISP_PWM1")
+ ),
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 181),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO181"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(2, "CMVREF1"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TRSTN"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TRSTN"),
+ MTK_FUNCTION(6, "SPU1_NTRST"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TRST_B")
+ ),
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, 182),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO182"),
+ MTK_FUNCTION(1, "SCL11"),
+ MTK_FUNCTION(2, "CMVREF2"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TCK"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TCK"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TCK"),
+ MTK_FUNCTION(6, "SPU1_TCK"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TCK")
+ ),
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 183),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO183"),
+ MTK_FUNCTION(1, "SDA11"),
+ MTK_FUNCTION(2, "CMVREF3"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TMS"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TMS"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TMS"),
+ MTK_FUNCTION(6, "SPU1_TMS"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TMS")
+ ),
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 184),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO184"),
+ MTK_FUNCTION(1, "SCL12"),
+ MTK_FUNCTION(2, "CMVREF4"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TDO"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TDO"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TDO"),
+ MTK_FUNCTION(6, "SPU1_TDO"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TDO")
+ ),
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 185),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO185"),
+ MTK_FUNCTION(1, "SDA12"),
+ MTK_FUNCTION(2, "CMVREF5"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TDI"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TDI"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TDI"),
+ MTK_FUNCTION(6, "SPU1_TDI"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TDI")
+ ),
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 186),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO186"),
+ MTK_FUNCTION(1, "MD_GPS_L1_BLANK"),
+ MTK_FUNCTION(2, "PMSR_SMAP"),
+ MTK_FUNCTION(3, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 187),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO187"),
+ MTK_FUNCTION(1, "MD_GPS_L5_BLANK"),
+ MTK_FUNCTION(3, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO188"),
+ MTK_FUNCTION(1, "SCL2"),
+ MTK_FUNCTION(2, "SCP_SCL8")
+ ),
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO189"),
+ MTK_FUNCTION(1, "SDA2"),
+ MTK_FUNCTION(2, "SCP_SDA8")
+ ),
+ MTK_PIN(
+ 190, "GPIO190",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO190"),
+ MTK_FUNCTION(1, "SCL4"),
+ MTK_FUNCTION(2, "SCP_SCL9"),
+ MTK_FUNCTION(6, "UDI_TDI_6")
+ ),
+ MTK_PIN(
+ 191, "GPIO191",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO191"),
+ MTK_FUNCTION(1, "SDA4"),
+ MTK_FUNCTION(2, "SCP_SDA9"),
+ MTK_FUNCTION(6, "UDI_TDI_7")
+ ),
+ MTK_PIN(
+ 192, "GPIO192",
+ MTK_EINT_FUNCTION(0, 192),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO192"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(4, "MD32_3_RXD")
+ ),
+ MTK_PIN(
+ 193, "GPIO193",
+ MTK_EINT_FUNCTION(0, 193),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO193"),
+ MTK_FUNCTION(3, "CLKM0_B"),
+ MTK_FUNCTION(4, "MD32_3_TXD"),
+ MTK_FUNCTION(6, "UDI_TDO_7")
+ ),
+ MTK_PIN(
+ 194, "GPIO194",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO194"),
+ MTK_FUNCTION(1, "SCL7"),
+ MTK_FUNCTION(2, "MD32_3_GPIO0"),
+ MTK_FUNCTION(3, "CLKM2_B"),
+ MTK_FUNCTION(6, "UDI_TDI_2")
+ ),
+ MTK_PIN(
+ 195, "GPIO195",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO195"),
+ MTK_FUNCTION(1, "SDA7"),
+ MTK_FUNCTION(3, "CLKM3_B"),
+ MTK_FUNCTION(6, "UDI_TDI_3")
+ ),
+ MTK_PIN(
+ 196, "GPIO196",
+ MTK_EINT_FUNCTION(0, 196),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO196"),
+ MTK_FUNCTION(1, "CMMCLK3")
+ ),
+ MTK_PIN(
+ 197, "GPIO197",
+ MTK_EINT_FUNCTION(0, 197),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO197"),
+ MTK_FUNCTION(3, "CLKM1_B"),
+ MTK_FUNCTION(6, "UDI_TDI_1")
+ ),
+ MTK_PIN(
+ 198, "GPIO198",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO198"),
+ MTK_FUNCTION(1, "SCL8"),
+ MTK_FUNCTION(6, "UDI_TDI_4")
+ ),
+ MTK_PIN(
+ 199, "GPIO199",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO199"),
+ MTK_FUNCTION(1, "SDA8"),
+ MTK_FUNCTION(6, "UDI_TDI_5")
+ ),
+ MTK_PIN(
+ 200, "GPIO200",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO200"),
+ MTK_FUNCTION(1, "SCL1")
+ ),
+ MTK_PIN(
+ 201, "GPIO201",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO201"),
+ MTK_FUNCTION(1, "SDA1"),
+ MTK_FUNCTION(7, "TSFDC_BG_COMP")
+ ),
+ MTK_PIN(
+ 202, "GPIO202",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO202"),
+ MTK_FUNCTION(1, "SCL9"),
+ MTK_FUNCTION(2, "SCP_SCL7"),
+ MTK_FUNCTION(6, "TP_GPIO15_AO")
+ ),
+ MTK_PIN(
+ 203, "GPIO203",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO203"),
+ MTK_FUNCTION(1, "SDA9"),
+ MTK_FUNCTION(2, "SCP_SDA7"),
+ MTK_FUNCTION(6, "TP_GPIO9_AO")
+ ),
+ MTK_PIN(
+ 204, "GPIO204",
+ MTK_EINT_FUNCTION(0, 204),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO204"),
+ MTK_FUNCTION(1, "SCL13"),
+ MTK_FUNCTION(2, "CMVREF6"),
+ MTK_FUNCTION(3, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(5, "CLKM2_B"),
+ MTK_FUNCTION(6, "TP_GPIO12_AO")
+ ),
+ MTK_PIN(
+ 205, "GPIO205",
+ MTK_EINT_FUNCTION(0, 205),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO205"),
+ MTK_FUNCTION(1, "SDA13"),
+ MTK_FUNCTION(2, "CMVREF7"),
+ MTK_FUNCTION(3, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(5, "CLKM3_B"),
+ MTK_FUNCTION(6, "TP_GPIO13_AO")
+ ),
+ MTK_PIN(
+ 206, "GPIO206",
+ MTK_EINT_FUNCTION(0, 206),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO206"),
+ MTK_FUNCTION(2, "MD32_2_GPIO0"),
+ MTK_FUNCTION(5, "VBUSVALID"),
+ MTK_FUNCTION(6, "UDI_TDO_3")
+ ),
+ MTK_PIN(
+ 207, "GPIO207",
+ MTK_EINT_FUNCTION(0, 207),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO207"),
+ MTK_FUNCTION(1, "PCIE_WAKEN_2P"),
+ MTK_FUNCTION(2, "PMSR_SMAP_MAX"),
+ MTK_FUNCTION(4, "FMI2S_A_BCK"),
+ MTK_FUNCTION(6, "UDI_TDO_4")
+ ),
+ MTK_PIN(
+ 208, "GPIO208",
+ MTK_EINT_FUNCTION(0, 208),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO208"),
+ MTK_FUNCTION(1, "PCIE_CLKREQN_2P"),
+ MTK_FUNCTION(2, "PMSR_SMAP_MAX_W"),
+ MTK_FUNCTION(4, "FMI2S_A_LRCK"),
+ MTK_FUNCTION(5, "CLKM0_B"),
+ MTK_FUNCTION(6, "UDI_TDO_5")
+ ),
+ MTK_PIN(
+ 209, "GPIO209",
+ MTK_EINT_FUNCTION(0, 209),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO209"),
+ MTK_FUNCTION(1, "PCIE_PERSTN_2P"),
+ MTK_FUNCTION(2, "PMSR_SMAP"),
+ MTK_FUNCTION(4, "FMI2S_A_DI"),
+ MTK_FUNCTION(5, "CLKM1_B"),
+ MTK_FUNCTION(6, "UDI_TDO_6")
+ ),
+ MTK_PIN(
+ 210, "GPIO210",
+ MTK_EINT_FUNCTION(0, 210),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO210"),
+ MTK_FUNCTION(1, "CMMCLK4")
+ ),
+ MTK_PIN(
+ 211, "GPIO211",
+ MTK_EINT_FUNCTION(0, 211),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO211"),
+ MTK_FUNCTION(1, "CMMCLK5"),
+ MTK_FUNCTION(2, "CONN_TCXOENA_REQ")
+ ),
+ MTK_PIN(
+ 212, "GPIO212",
+ MTK_EINT_FUNCTION(0, 212),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO212"),
+ MTK_FUNCTION(1, "CMMCLK6"),
+ MTK_FUNCTION(2, "TP_GPIO10_AO"),
+ MTK_FUNCTION(5, "IDDIG"),
+ MTK_FUNCTION(6, "UDI_TDO_1")
+ ),
+ MTK_PIN(
+ 213, "GPIO213",
+ MTK_EINT_FUNCTION(0, 213),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO213"),
+ MTK_FUNCTION(1, "CMMCLK7"),
+ MTK_FUNCTION(2, "TP_GPIO11_AO"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(6, "UDI_TDO_2")
+ ),
+ MTK_PIN(
+ 214, "GPIO214",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO214"),
+ MTK_FUNCTION(1, "SCP_SCL3"),
+ MTK_FUNCTION(2, "SDA14_E1_SCL14_E2"),
+ MTK_FUNCTION(6, "GBE1_MDC"),
+ MTK_FUNCTION(7, "GBE0_MDC")
+ ),
+ MTK_PIN(
+ 215, "GPIO215",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO215"),
+ MTK_FUNCTION(1, "SCP_SDA3"),
+ MTK_FUNCTION(2, "SCL14_E1_SDA14_E2"),
+ MTK_FUNCTION(6, "GBE1_MDIO"),
+ MTK_FUNCTION(7, "GBE0_MDIO")
+ ),
+ MTK_PIN(
+ 216, "GPIO216",
+ MTK_EINT_FUNCTION(0, 216),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO216"),
+ MTK_FUNCTION(1, "GPS_PPS0")
+ ),
+ MTK_PIN(
+ 217, "GPIO217",
+ MTK_EINT_FUNCTION(0, 217),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO217"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(6, "TP_GPIO12_AO")
+ ),
+ MTK_PIN(
+ 218, "GPIO218",
+ MTK_EINT_FUNCTION(0, 218),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO218"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "SPI0_WP"),
+ MTK_FUNCTION(3, "MBISTREADEN_TRIGGER"),
+ MTK_FUNCTION(5, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(6, "TP_GPIO14_AO")
+ ),
+ MTK_PIN(
+ 219, "GPIO219",
+ MTK_EINT_FUNCTION(0, 219),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO219"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "SPI0_HOLD"),
+ MTK_FUNCTION(3, "MBISTWRITEEN_TRIGGER"),
+ MTK_FUNCTION(4, "SPMI_M_TRIG_FLAG"),
+ MTK_FUNCTION(5, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN_VLP"),
+ MTK_FUNCTION(7, "JTRSTN_SEL1")
+ ),
+ MTK_PIN(
+ 220, "GPIO220",
+ MTK_EINT_FUNCTION(0, 220),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO220"),
+ MTK_FUNCTION(1, "SPI0_CLK"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK_VLP"),
+ MTK_FUNCTION(7, "JTCK_SEL1")
+ ),
+ MTK_PIN(
+ 221, "GPIO221",
+ MTK_EINT_FUNCTION(0, 221),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO221"),
+ MTK_FUNCTION(1, "SPI0_CSB"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS_VLP"),
+ MTK_FUNCTION(7, "JTMS_SEL1")
+ ),
+ MTK_PIN(
+ 222, "GPIO222",
+ MTK_EINT_FUNCTION(0, 222),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO222"),
+ MTK_FUNCTION(1, "SPI0_MO"),
+ MTK_FUNCTION(2, "SCP_SCL7"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO_VLP"),
+ MTK_FUNCTION(7, "JTDO_SEL1")
+ ),
+ MTK_PIN(
+ 223, "GPIO223",
+ MTK_EINT_FUNCTION(0, 223),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO223"),
+ MTK_FUNCTION(1, "SPI0_MI"),
+ MTK_FUNCTION(2, "SCP_SDA7"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI_VLP"),
+ MTK_FUNCTION(7, "JTDI_SEL1")
+ ),
+ MTK_PIN(
+ 224, "GPIO224",
+ MTK_EINT_FUNCTION(0, 224),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO224"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(2, "DMIC2_CLK"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS0"),
+ MTK_FUNCTION(4, "GBE0_TXER"),
+ MTK_FUNCTION(5, "GBE1_TXER"),
+ MTK_FUNCTION(6, "GBE1_AUX_PPS0"),
+ MTK_FUNCTION(7, "MD32_1_TXD")
+ ),
+ MTK_PIN(
+ 225, "GPIO225",
+ MTK_EINT_FUNCTION(0, 225),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO225"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(2, "DMIC2_DAT"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS1"),
+ MTK_FUNCTION(4, "GBE0_RXER"),
+ MTK_FUNCTION(5, "GBE1_RXER"),
+ MTK_FUNCTION(6, "GBE1_AUX_PPS1"),
+ MTK_FUNCTION(7, "MD32_1_RXD")
+ ),
+ MTK_PIN(
+ 226, "GPIO226",
+ MTK_EINT_FUNCTION(0, 226),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO226"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(2, "I2SIN3_BCK"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS2"),
+ MTK_FUNCTION(4, "GBE0_COL"),
+ MTK_FUNCTION(5, "GBE1_COL"),
+ MTK_FUNCTION(6, "GBE1_AUX_PPS2"),
+ MTK_FUNCTION(7, "GBE1_MDC")
+ ),
+ MTK_PIN(
+ 227, "GPIO227",
+ MTK_EINT_FUNCTION(0, 227),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO227"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(2, "I2SIN3_LRCK"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS3"),
+ MTK_FUNCTION(4, "GBE0_INTR"),
+ MTK_FUNCTION(5, "GBE1_INTR"),
+ MTK_FUNCTION(6, "GBE1_AUX_PPS3"),
+ MTK_FUNCTION(7, "GBE1_MDIO")
+ ),
+ MTK_PIN(
+ 228, "GPIO228",
+ MTK_EINT_FUNCTION(0, 228),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO228"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(2, "I2SIN3_DI"),
+ MTK_FUNCTION(3, "GBE0_MDC"),
+ MTK_FUNCTION(4, "GBE1_MDC"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_AICE_TCKC")
+ ),
+ MTK_PIN(
+ 229, "GPIO229",
+ MTK_EINT_FUNCTION(0, 229),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO229"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(2, "I2SOUT3_DO"),
+ MTK_FUNCTION(3, "GBE0_MDIO"),
+ MTK_FUNCTION(4, "GBE1_MDIO"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_AICE_TMSC"),
+ MTK_FUNCTION(7, "AVB_CLK2")
+ ),
+ MTK_PIN(
+ 230, "GPIO230",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO230"),
+ MTK_FUNCTION(1, "CONN_TOP_CLK")
+ ),
+ MTK_PIN(
+ 231, "GPIO231",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO231"),
+ MTK_FUNCTION(1, "CONN_TOP_DATA")
+ ),
+ MTK_PIN(
+ 232, "GPIO232",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO232"),
+ MTK_FUNCTION(1, "CONN_HRST_B")
+ ),
+ MTK_PIN(
+ 233, "GPIO233",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO233"),
+ MTK_FUNCTION(1, "I2SIN0_BCK")
+ ),
+ MTK_PIN(
+ 234, "GPIO234",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO234"),
+ MTK_FUNCTION(1, "I2SIN0_LRCK")
+ ),
+ MTK_PIN(
+ 235, "GPIO235",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO235"),
+ MTK_FUNCTION(1, "I2SIN0_DI")
+ ),
+ MTK_PIN(
+ 236, "GPIO236",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO236"),
+ MTK_FUNCTION(1, "I2SOUT0_DO")
+ ),
+ MTK_PIN(
+ 237, "GPIO237",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO237"),
+ MTK_FUNCTION(1, "CONN_UARTHUB_UART_TX"),
+ MTK_FUNCTION(3, "UTXD3")
+ ),
+ MTK_PIN(
+ 238, "GPIO238",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO238"),
+ MTK_FUNCTION(1, "CONN_UARTHUB_UART_RX"),
+ MTK_FUNCTION(3, "URXD3")
+ ),
+ MTK_PIN(
+ 239, "GPIO239",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO239"),
+ MTK_FUNCTION(1, "TP_UTXD_CONSYS_VLP"),
+ MTK_FUNCTION(2, "TP_URXD_CONSYS_VLP")
+ ),
+ MTK_PIN(
+ 240, "GPIO240",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO240"),
+ MTK_FUNCTION(1, "TP_URXD_CONSYS_VLP"),
+ MTK_FUNCTION(2, "TP_UTXD_CONSYS_VLP")
+ ),
+ MTK_PIN(
+ 241, "GPIO241",
+ MTK_EINT_FUNCTION(0, 241),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO241"),
+ MTK_FUNCTION(1, "PCIE_PERSTN")
+ ),
+ MTK_PIN(
+ 242, "GPIO242",
+ MTK_EINT_FUNCTION(0, 242),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO242"),
+ MTK_FUNCTION(1, "PCIE_WAKEN")
+ ),
+ MTK_PIN(
+ 243, "GPIO243",
+ MTK_EINT_FUNCTION(0, 243),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO243"),
+ MTK_FUNCTION(1, "PCIE_CLKREQN")
+ ),
+ MTK_PIN(
+ 244, "GPIO244",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO244"),
+ MTK_FUNCTION(1, "CONN_RST")
+ ),
+ MTK_PIN(
+ 245, "GPIO245",
+ MTK_EINT_FUNCTION(0, 245),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO245")
+ ),
+ MTK_PIN(
+ 246, "GPIO246",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO246"),
+ MTK_FUNCTION(1, "CONN_PTA_TXD0")
+ ),
+ MTK_PIN(
+ 247, "GPIO247",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO247"),
+ MTK_FUNCTION(1, "CONN_PTA_RXD0")
+ ),
+ MTK_PIN(
+ 248, "GPIO248",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO248"),
+ MTK_FUNCTION(3, "UCTS3")
+ ),
+ MTK_PIN(
+ 249, "GPIO249",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO249"),
+ MTK_FUNCTION(3, "URTS3")
+ ),
+ MTK_PIN(
+ 250, "GPIO250",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO250")
+ ),
+ MTK_PIN(
+ 251, "GPIO251",
+ MTK_EINT_FUNCTION(0, 251),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO251"),
+ MTK_FUNCTION(1, "IDDIG_1P")
+ ),
+ MTK_PIN(
+ 252, "GPIO252",
+ MTK_EINT_FUNCTION(0, 252),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO252"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_1P")
+ ),
+ MTK_PIN(
+ 253, "GPIO253",
+ MTK_EINT_FUNCTION(0, 253),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO253"),
+ MTK_FUNCTION(1, "VBUSVALID_1P")
+ ),
+ MTK_PIN(
+ 254, "GPIO254",
+ MTK_EINT_FUNCTION(0, 254),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO254"),
+ MTK_FUNCTION(1, "IDDIG_2P")
+ ),
+ MTK_PIN(
+ 255, "GPIO255",
+ MTK_EINT_FUNCTION(0, 255),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO255"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_2P")
+ ),
+ MTK_PIN(
+ 256, "GPIO256",
+ MTK_EINT_FUNCTION(0, 256),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO256"),
+ MTK_FUNCTION(1, "VBUSVALID_2P")
+ ),
+ MTK_PIN(
+ 257, "GPIO257",
+ MTK_EINT_FUNCTION(0, 257),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO257"),
+ MTK_FUNCTION(1, "VBUSVALID_3P")
+ ),
+ MTK_PIN(
+ 258, "GPIO258",
+ MTK_EINT_FUNCTION(0, 258),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO258"),
+ MTK_FUNCTION(7, "AVB_CLK1")
+ ),
+ MTK_PIN(
+ 259, "GPIO259",
+ MTK_EINT_FUNCTION(0, 259),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO259"),
+ MTK_FUNCTION(1, "GBE0_TXD0"),
+ MTK_FUNCTION(2, "GBE1_TXD0")
+ ),
+ MTK_PIN(
+ 260, "GPIO260",
+ MTK_EINT_FUNCTION(0, 260),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO260"),
+ MTK_FUNCTION(1, "GBE0_TXD1"),
+ MTK_FUNCTION(2, "GBE1_TXD1")
+ ),
+ MTK_PIN(
+ 261, "GPIO261",
+ MTK_EINT_FUNCTION(0, 261),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO261"),
+ MTK_FUNCTION(1, "GBE0_TXC"),
+ MTK_FUNCTION(2, "GBE1_TXC")
+ ),
+ MTK_PIN(
+ 262, "GPIO262",
+ MTK_EINT_FUNCTION(0, 262),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO262"),
+ MTK_FUNCTION(1, "GBE0_TXEN"),
+ MTK_FUNCTION(2, "GBE1_TXEN")
+ ),
+ MTK_PIN(
+ 263, "GPIO263",
+ MTK_EINT_FUNCTION(0, 263),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO263"),
+ MTK_FUNCTION(1, "GBE0_RXD0"),
+ MTK_FUNCTION(2, "GBE1_RXD0"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS0")
+ ),
+ MTK_PIN(
+ 264, "GPIO264",
+ MTK_EINT_FUNCTION(0, 264),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO264"),
+ MTK_FUNCTION(1, "GBE0_RXD1"),
+ MTK_FUNCTION(2, "GBE1_RXD1"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS1")
+ ),
+ MTK_PIN(
+ 265, "GPIO265",
+ MTK_EINT_FUNCTION(0, 265),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO265"),
+ MTK_FUNCTION(1, "GBE0_RXC"),
+ MTK_FUNCTION(2, "GBE1_RXC"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS2")
+ ),
+ MTK_PIN(
+ 266, "GPIO266",
+ MTK_EINT_FUNCTION(0, 266),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO266"),
+ MTK_FUNCTION(1, "GBE0_RXDV"),
+ MTK_FUNCTION(2, "GBE1_RXDV"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS3")
+ ),
+ MTK_PIN(
+ 267, "GPIO267",
+ MTK_EINT_FUNCTION(0, 267),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO267"),
+ MTK_FUNCTION(1, "GBE0_TXD2"),
+ MTK_FUNCTION(2, "GBE1_TXD2"),
+ MTK_FUNCTION(3, "GBE0_RXER"),
+ MTK_FUNCTION(4, "GBE1_RXER")
+ ),
+ MTK_PIN(
+ 268, "GPIO268",
+ MTK_EINT_FUNCTION(0, 268),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO268"),
+ MTK_FUNCTION(1, "GBE0_TXD3"),
+ MTK_FUNCTION(2, "GBE1_TXD3")
+ ),
+ MTK_PIN(
+ 269, "GPIO269",
+ MTK_EINT_FUNCTION(0, 269),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO269"),
+ MTK_FUNCTION(1, "GBE0_RXD2"),
+ MTK_FUNCTION(2, "GBE1_RXD2"),
+ MTK_FUNCTION(3, "GBE0_MDC")
+ ),
+ MTK_PIN(
+ 270, "GPIO270",
+ MTK_EINT_FUNCTION(0, 270),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO270"),
+ MTK_FUNCTION(1, "GBE0_RXD3"),
+ MTK_FUNCTION(2, "GBE1_RXD3"),
+ MTK_FUNCTION(3, "GBE0_MDIO")
+ ),
+ MTK_PIN(
+ 271, "veint271",
+ MTK_EINT_FUNCTION(0, 271),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 272, "veint272",
+ MTK_EINT_FUNCTION(0, 272),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 273, "veint273",
+ MTK_EINT_FUNCTION(0, 273),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 274, "veint274",
+ MTK_EINT_FUNCTION(0, 274),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 275, "veint275",
+ MTK_EINT_FUNCTION(0, 275),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 276, "veint276",
+ MTK_EINT_FUNCTION(0, 276),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 277, "veint277",
+ MTK_EINT_FUNCTION(0, 277),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 278, "veint278",
+ MTK_EINT_FUNCTION(0, 278),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 279, "veint279",
+ MTK_EINT_FUNCTION(0, 279),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 280, "veint280",
+ MTK_EINT_FUNCTION(0, 280),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 281, "veint281",
+ MTK_EINT_FUNCTION(0, 281),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 282, "veint282",
+ MTK_EINT_FUNCTION(0, 282),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 283, "veint283",
+ MTK_EINT_FUNCTION(0, 283),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 284, "veint284",
+ MTK_EINT_FUNCTION(0, 284),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 285, "veint285",
+ MTK_EINT_FUNCTION(0, 285),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 286, "veint286",
+ MTK_EINT_FUNCTION(0, 286),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 287, "veint287",
+ MTK_EINT_FUNCTION(0, 287),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 288, "veint288",
+ MTK_EINT_FUNCTION(0, 288),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 289, "veint289",
+ MTK_EINT_FUNCTION(0, 289),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 290, "veint290",
+ MTK_EINT_FUNCTION(0, 290),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 291, "veint291",
+ MTK_EINT_FUNCTION(0, 291),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 292, "veint292",
+ MTK_EINT_FUNCTION(0, 292),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ )
+};
+
+static struct mtk_eint_pin eint_pins_mt8196[] = {
+ MTK_EINT_PIN(0, 2, 0, 1),
+ MTK_EINT_PIN(1, 2, 1, 1),
+ MTK_EINT_PIN(2, 2, 16, 0),
+ MTK_EINT_PIN(3, 2, 17, 0),
+ MTK_EINT_PIN(4, 2, 2, 1),
+ MTK_EINT_PIN(5, 2, 3, 1),
+ MTK_EINT_PIN(6, 2, 4, 1),
+ MTK_EINT_PIN(7, 2, 5, 1),
+ MTK_EINT_PIN(8, 2, 6, 1),
+ MTK_EINT_PIN(9, 2, 18, 0),
+ MTK_EINT_PIN(10, 2, 7, 1),
+ MTK_EINT_PIN(11, 2, 8, 1),
+ MTK_EINT_PIN(12, 2, 9, 1),
+ MTK_EINT_PIN(13, 1, 4, 0),
+ MTK_EINT_PIN(14, 0, 0, 1),
+ MTK_EINT_PIN(15, 1, 5, 0),
+ MTK_EINT_PIN(16, 1, 6, 0),
+ MTK_EINT_PIN(17, 1, 7, 0),
+ MTK_EINT_PIN(18, 1, 8, 0),
+ MTK_EINT_PIN(19, 1, 9, 0),
+ MTK_EINT_PIN(20, 0, 1, 1),
+ MTK_EINT_PIN(21, 0, 10, 0),
+ MTK_EINT_PIN(22, 0, 11, 0),
+ MTK_EINT_PIN(23, 0, 12, 0),
+ MTK_EINT_PIN(24, 0, 13, 0),
+ MTK_EINT_PIN(25, 0, 14, 0),
+ MTK_EINT_PIN(26, 0, 15, 0),
+ MTK_EINT_PIN(27, 0, 2, 1),
+ MTK_EINT_PIN(28, 0, 16, 0),
+ MTK_EINT_PIN(29, 0, 17, 0),
+ MTK_EINT_PIN(30, 0, 18, 0),
+ MTK_EINT_PIN(31, 0, 3, 1),
+ MTK_EINT_PIN(32, 0, 19, 0),
+ MTK_EINT_PIN(33, 0, 20, 0),
+ MTK_EINT_PIN(34, 0, 21, 0),
+ MTK_EINT_PIN(35, 0, 22, 0),
+ MTK_EINT_PIN(36, 0, 23, 0),
+ MTK_EINT_PIN(37, 0, 24, 0),
+ MTK_EINT_PIN(38, 0, 25, 0),
+ MTK_EINT_PIN(39, 2, 10, 1),
+ MTK_EINT_PIN(40, 2, 11, 1),
+ MTK_EINT_PIN(41, 2, 12, 1),
+ MTK_EINT_PIN(42, 2, 13, 1),
+ MTK_EINT_PIN(43, 2, 14, 1),
+ MTK_EINT_PIN(44, 2, 19, 0),
+ MTK_EINT_PIN(45, 2, 20, 0),
+ MTK_EINT_PIN(46, 2, 21, 0),
+ MTK_EINT_PIN(47, 2, 22, 0),
+ MTK_EINT_PIN(48, 2, 23, 0),
+ MTK_EINT_PIN(49, 2, 24, 0),
+ MTK_EINT_PIN(50, 2, 25, 0),
+ MTK_EINT_PIN(51, 2, 26, 0),
+ MTK_EINT_PIN(52, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(53, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(54, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(55, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(56, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(57, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(58, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(59, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(60, 2, 27, 0),
+ MTK_EINT_PIN(61, 2, 28, 0),
+ MTK_EINT_PIN(62, 2, 29, 0),
+ MTK_EINT_PIN(63, 2, 30, 0),
+ MTK_EINT_PIN(64, 2, 31, 0),
+ MTK_EINT_PIN(65, 2, 32, 0),
+ MTK_EINT_PIN(66, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(67, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(68, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(69, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(70, 2, 33, 0),
+ MTK_EINT_PIN(71, 2, 34, 0),
+ MTK_EINT_PIN(72, 2, 35, 0),
+ MTK_EINT_PIN(73, 2, 36, 0),
+ MTK_EINT_PIN(74, 2, 37, 0),
+ MTK_EINT_PIN(75, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(76, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(77, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(78, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(79, 2, 38, 0),
+ MTK_EINT_PIN(80, 2, 39, 0),
+ MTK_EINT_PIN(81, 2, 40, 0),
+ MTK_EINT_PIN(82, 2, 41, 0),
+ MTK_EINT_PIN(83, 2, 42, 0),
+ MTK_EINT_PIN(84, 2, 43, 0),
+ MTK_EINT_PIN(85, 2, 44, 0),
+ MTK_EINT_PIN(86, 2, 45, 0),
+ MTK_EINT_PIN(87, 2, 46, 0),
+ MTK_EINT_PIN(88, 2, 47, 0),
+ MTK_EINT_PIN(89, 2, 48, 0),
+ MTK_EINT_PIN(90, 2, 49, 0),
+ MTK_EINT_PIN(91, 2, 50, 0),
+ MTK_EINT_PIN(92, 2, 15, 1),
+ MTK_EINT_PIN(93, 2, 51, 0),
+ MTK_EINT_PIN(94, 2, 52, 0),
+ MTK_EINT_PIN(95, 2, 53, 0),
+ MTK_EINT_PIN(96, 2, 54, 0),
+ MTK_EINT_PIN(97, 2, 55, 0),
+ MTK_EINT_PIN(98, 2, 56, 0),
+ MTK_EINT_PIN(99, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(100, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(101, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(102, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(103, 2, 57, 0),
+ MTK_EINT_PIN(104, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(105, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(106, 1, 10, 0),
+ MTK_EINT_PIN(107, 1, 11, 0),
+ MTK_EINT_PIN(108, 1, 12, 0),
+ MTK_EINT_PIN(109, 1, 13, 0),
+ MTK_EINT_PIN(110, 1, 0, 1),
+ MTK_EINT_PIN(111, 1, 1, 1),
+ MTK_EINT_PIN(112, 1, 2, 1),
+ MTK_EINT_PIN(113, 1, 3, 1),
+ MTK_EINT_PIN(114, 1, 14, 0),
+ MTK_EINT_PIN(115, 1, 15, 0),
+ MTK_EINT_PIN(116, 1, 16, 0),
+ MTK_EINT_PIN(117, 1, 17, 0),
+ MTK_EINT_PIN(118, 1, 18, 0),
+ MTK_EINT_PIN(119, 1, 19, 0),
+ MTK_EINT_PIN(120, 1, 20, 0),
+ MTK_EINT_PIN(121, 1, 21, 0),
+ MTK_EINT_PIN(122, 1, 22, 0),
+ MTK_EINT_PIN(123, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(124, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(125, 1, 23, 0),
+ MTK_EINT_PIN(126, 1, 24, 0),
+ MTK_EINT_PIN(127, 1, 25, 0),
+ MTK_EINT_PIN(128, 1, 26, 0),
+ MTK_EINT_PIN(129, 1, 27, 0),
+ MTK_EINT_PIN(130, 1, 28, 0),
+ MTK_EINT_PIN(131, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(132, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(133, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(134, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(135, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(136, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(137, 0, 26, 0),
+ MTK_EINT_PIN(138, 0, 27, 0),
+ MTK_EINT_PIN(139, 0, 28, 0),
+ MTK_EINT_PIN(140, 0, 29, 0),
+ MTK_EINT_PIN(141, 0, 30, 0),
+ MTK_EINT_PIN(142, 0, 31, 0),
+ MTK_EINT_PIN(143, 0, 32, 0),
+ MTK_EINT_PIN(144, 0, 33, 0),
+ MTK_EINT_PIN(145, 0, 34, 0),
+ MTK_EINT_PIN(146, 0, 35, 0),
+ MTK_EINT_PIN(147, 0, 36, 0),
+ MTK_EINT_PIN(148, 0, 4, 1),
+ MTK_EINT_PIN(149, 0, 37, 0),
+ MTK_EINT_PIN(150, 0, 5, 1),
+ MTK_EINT_PIN(151, 0, 38, 0),
+ MTK_EINT_PIN(152, 0, 39, 0),
+ MTK_EINT_PIN(153, 0, 40, 0),
+ MTK_EINT_PIN(154, 0, 41, 0),
+ MTK_EINT_PIN(155, 0, 42, 0),
+ MTK_EINT_PIN(156, 0, 43, 0),
+ MTK_EINT_PIN(157, 0, 44, 0),
+ MTK_EINT_PIN(158, 0, 45, 0),
+ MTK_EINT_PIN(159, 0, 46, 0),
+ MTK_EINT_PIN(160, 0, 47, 0),
+ MTK_EINT_PIN(161, 0, 48, 0),
+ MTK_EINT_PIN(162, 0, 49, 0),
+ MTK_EINT_PIN(163, 0, 50, 0),
+ MTK_EINT_PIN(164, 0, 51, 0),
+ MTK_EINT_PIN(165, 0, 52, 0),
+ MTK_EINT_PIN(166, 0, 53, 0),
+ MTK_EINT_PIN(167, 0, 54, 0),
+ MTK_EINT_PIN(168, 0, 55, 0),
+ MTK_EINT_PIN(169, 0, 56, 0),
+ MTK_EINT_PIN(170, 0, 57, 0),
+ MTK_EINT_PIN(171, 0, 58, 0),
+ MTK_EINT_PIN(172, 0, 6, 1),
+ MTK_EINT_PIN(173, 0, 7, 1),
+ MTK_EINT_PIN(174, 0, 8, 1),
+ MTK_EINT_PIN(175, 0, 9, 1),
+ MTK_EINT_PIN(176, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(177, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(178, 0, 59, 0),
+ MTK_EINT_PIN(179, 0, 60, 0),
+ MTK_EINT_PIN(180, 0, 61, 0),
+ MTK_EINT_PIN(181, 0, 62, 0),
+ MTK_EINT_PIN(182, 0, 63, 0),
+ MTK_EINT_PIN(183, 0, 64, 0),
+ MTK_EINT_PIN(184, 0, 65, 0),
+ MTK_EINT_PIN(185, 0, 66, 0),
+ MTK_EINT_PIN(186, 3, 6, 0),
+ MTK_EINT_PIN(187, 3, 7, 0),
+ MTK_EINT_PIN(188, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(189, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(190, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(191, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(192, 3, 8, 0),
+ MTK_EINT_PIN(193, 3, 9, 0),
+ MTK_EINT_PIN(194, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(195, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(196, 3, 10, 0),
+ MTK_EINT_PIN(197, 3, 11, 0),
+ MTK_EINT_PIN(198, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(199, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(200, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(201, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(202, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(203, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(204, 3, 12, 0),
+ MTK_EINT_PIN(205, 3, 13, 0),
+ MTK_EINT_PIN(206, 3, 14, 0),
+ MTK_EINT_PIN(207, 3, 0, 1),
+ MTK_EINT_PIN(208, 3, 1, 1),
+ MTK_EINT_PIN(209, 3, 2, 1),
+ MTK_EINT_PIN(210, 3, 15, 0),
+ MTK_EINT_PIN(211, 3, 3, 1),
+ MTK_EINT_PIN(212, 3, 4, 1),
+ MTK_EINT_PIN(213, 3, 5, 1),
+ MTK_EINT_PIN(214, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(215, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(216, 3, 16, 0),
+ MTK_EINT_PIN(217, 3, 17, 0),
+ MTK_EINT_PIN(218, 3, 18, 0),
+ MTK_EINT_PIN(219, 3, 19, 0),
+ MTK_EINT_PIN(220, 3, 20, 0),
+ MTK_EINT_PIN(221, 3, 21, 0),
+ MTK_EINT_PIN(222, 3, 22, 0),
+ MTK_EINT_PIN(223, 3, 23, 0),
+ MTK_EINT_PIN(224, 3, 24, 0),
+ MTK_EINT_PIN(225, 3, 25, 0),
+ MTK_EINT_PIN(226, 3, 26, 0),
+ MTK_EINT_PIN(227, 3, 27, 0),
+ MTK_EINT_PIN(228, 3, 28, 0),
+ MTK_EINT_PIN(229, 3, 29, 0),
+ MTK_EINT_PIN(230, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(231, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(232, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(233, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(234, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(235, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(236, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(237, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(238, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(239, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(240, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(241, 3, 30, 0),
+ MTK_EINT_PIN(242, 3, 31, 0),
+ MTK_EINT_PIN(243, 3, 32, 0),
+ MTK_EINT_PIN(244, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(245, 3, 45, 0),
+ MTK_EINT_PIN(246, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(247, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(248, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(249, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(250, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(251, 0, 67, 0),
+ MTK_EINT_PIN(252, 0, 68, 0),
+ MTK_EINT_PIN(253, 0, 69, 0),
+ MTK_EINT_PIN(254, 0, 70, 0),
+ MTK_EINT_PIN(255, 0, 71, 0),
+ MTK_EINT_PIN(256, 0, 72, 0),
+ MTK_EINT_PIN(257, 0, 73, 0),
+ MTK_EINT_PIN(258, 0, 74, 0),
+ MTK_EINT_PIN(259, 3, 33, 0),
+ MTK_EINT_PIN(260, 3, 34, 0),
+ MTK_EINT_PIN(261, 3, 35, 0),
+ MTK_EINT_PIN(262, 3, 36, 0),
+ MTK_EINT_PIN(263, 3, 37, 0),
+ MTK_EINT_PIN(264, 3, 38, 0),
+ MTK_EINT_PIN(265, 3, 39, 0),
+ MTK_EINT_PIN(266, 3, 40, 0),
+ MTK_EINT_PIN(267, 3, 41, 0),
+ MTK_EINT_PIN(268, 3, 42, 0),
+ MTK_EINT_PIN(269, 3, 43, 0),
+ MTK_EINT_PIN(270, 3, 44, 0),
+ MTK_EINT_PIN(271, 4, 0, 0),
+ MTK_EINT_PIN(272, 4, 1, 0),
+ MTK_EINT_PIN(273, 4, 2, 0),
+ MTK_EINT_PIN(274, 4, 3, 0),
+ MTK_EINT_PIN(275, 4, 4, 0),
+ MTK_EINT_PIN(276, 4, 5, 0),
+ MTK_EINT_PIN(277, 4, 6, 0),
+ MTK_EINT_PIN(278, 4, 7, 0),
+ MTK_EINT_PIN(279, 4, 8, 0),
+ MTK_EINT_PIN(280, 4, 9, 0),
+ MTK_EINT_PIN(281, 4, 10, 0),
+ MTK_EINT_PIN(282, 4, 11, 0),
+ MTK_EINT_PIN(283, 4, 12, 0),
+ MTK_EINT_PIN(284, 4, 13, 0),
+ MTK_EINT_PIN(285, 4, 14, 0),
+ MTK_EINT_PIN(286, 4, 15, 0),
+ MTK_EINT_PIN(287, 4, 16, 0),
+ MTK_EINT_PIN(288, 4, 17, 0),
+ MTK_EINT_PIN(289, 4, 18, 0),
+ MTK_EINT_PIN(290, 4, 19, 0),
+ MTK_EINT_PIN(291, 4, 20, 0),
+ MTK_EINT_PIN(292, 4, 21, 0),
+};
+#endif /* __PINCTRL_MTK_MT8196_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 87e958d827bf..89ef4e530fcc 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -840,9 +840,6 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
const struct mtk_pin_desc *desc;
int value, err;
- if (gpio >= hw->soc->npins)
- return -EINVAL;
-
/*
* "Virtual" GPIOs are always and only used for interrupts
* Since they are only used for interrupts, they are always inputs
@@ -868,9 +865,6 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
const struct mtk_pin_desc *desc;
int value, err;
- if (gpio >= hw->soc->npins)
- return -EINVAL;
-
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
@@ -880,38 +874,29 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return !!value;
}
-static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct mtk_pinctrl *hw = gpiochip_get_data(chip);
const struct mtk_pin_desc *desc;
- if (gpio >= hw->soc->npins)
- return;
-
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
- mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
}
static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
{
- struct mtk_pinctrl *hw = gpiochip_get_data(chip);
-
- if (gpio >= hw->soc->npins)
- return -EINVAL;
-
return pinctrl_gpio_direction_input(chip, gpio);
}
static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
int value)
{
- struct mtk_pinctrl *hw = gpiochip_get_data(chip);
-
- if (gpio >= hw->soc->npins)
- return -EINVAL;
+ int ret;
- mtk_gpio_set(chip, gpio, value);
+ ret = mtk_gpio_set(chip, gpio, value);
+ if (ret)
+ return ret;
return pinctrl_gpio_direction_output(chip, gpio);
}
@@ -964,7 +949,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->direction_input = mtk_gpio_direction_input;
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
- chip->set = mtk_gpio_set;
+ chip->set_rv = mtk_gpio_set;
chip->to_irq = mtk_gpio_to_irq;
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index 90639bc171f6..0315e224bce6 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -3,7 +3,7 @@ menuconfig PINCTRL_MESON
tristate "Amlogic SoC pinctrl drivers"
depends on ARCH_MESON || COMPILE_TEST
depends on OF
- default y
+ default ARCH_MESON
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -17,25 +17,25 @@ config PINCTRL_MESON8
bool "Meson 8 SoC pinctrl driver"
depends on ARM
select PINCTRL_MESON8_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON8B
bool "Meson 8b SoC pinctrl driver"
depends on ARM
select PINCTRL_MESON8_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_GXBB
tristate "Meson gxbb SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON8_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_GXL
tristate "Meson gxl SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON8_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON8_PMX
tristate
@@ -44,7 +44,7 @@ config PINCTRL_MESON_AXG
tristate "Meson axg Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_AXG_PMX
tristate
@@ -53,24 +53,24 @@ config PINCTRL_MESON_G12A
tristate "Meson g12a Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_A1
tristate "Meson a1 Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_S4
tristate "Meson s4 Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_AMLOGIC_A4
bool "AMLOGIC pincontrol"
depends on ARM64
- default y
+ default ARCH_MESON
help
This is the driver for the pin controller found on Amlogic SoCs.
@@ -82,12 +82,12 @@ config PINCTRL_AMLOGIC_C3
tristate "Amlogic C3 SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_AMLOGIC_T7
tristate "Amlogic T7 SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
endif
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
index ee7bbc72f9b3..385cc619df13 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
@@ -596,20 +596,6 @@ static int aml_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
-static inline const struct aml_pctl_group *
- aml_pctl_find_group_by_name(const struct aml_pinctrl *info,
- const char *name)
-{
- int i;
-
- for (i = 0; i < info->ngroups; i++) {
- if (!strcmp(info->groups[i].name, name))
- return &info->groups[i];
- }
-
- return NULL;
-}
-
static void aml_pin_dbg_show(struct pinctrl_dev *pcdev, struct seq_file *s,
unsigned int offset)
{
@@ -806,15 +792,15 @@ static int aml_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
value ? BIT(bit) : 0);
}
-static void aml_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int aml_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct aml_gpio_bank *bank = gpiochip_get_data(chip);
unsigned int bit, reg;
aml_gpio_calc_reg_and_bit(bank, AML_REG_OUT, gpio, &reg, &bit);
- regmap_update_bits(bank->reg_gpio, reg, BIT(bit),
- value ? BIT(bit) : 0);
+ return regmap_update_bits(bank->reg_gpio, reg, BIT(bit),
+ value ? BIT(bit) : 0);
}
static int aml_gpio_get(struct gpio_chip *chip, unsigned int gpio)
@@ -832,7 +818,7 @@ static const struct gpio_chip aml_gpio_template = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.set_config = gpiochip_generic_config,
- .set = aml_gpio_set,
+ .set_rv = aml_gpio_set,
.get = aml_gpio_get,
.direction_input = aml_gpio_direction_input,
.direction_output = aml_gpio_direction_output,
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index e5a32a0532ee..f5be61f2ede4 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -580,9 +580,9 @@ static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
gpio, value);
}
-static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+static int meson_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
- meson_pinconf_set_drive(gpiochip_get_data(chip), gpio, value);
+ return meson_pinconf_set_drive(gpiochip_get_data(chip), gpio, value);
}
static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
@@ -616,7 +616,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.direction_input = meson_gpio_direction_input;
pc->chip.direction_output = meson_gpio_direction_output;
pc->chip.get = meson_gpio_get;
- pc->chip.set = meson_gpio_set;
+ pc->chip.set_rv = meson_gpio_set;
pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 335744ac8310..a6b106984e12 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -358,9 +358,7 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev,
val = grp->val[func];
- regmap_update_bits(info->regmap, reg, mask, val);
-
- return 0;
+ return regmap_update_bits(info->regmap, reg, mask, val);
}
static int armada_37xx_pmx_set(struct pinctrl_dev *pctldev,
@@ -402,10 +400,13 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
unsigned int reg = OUTPUT_EN;
unsigned int val, mask;
+ int ret;
armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
- regmap_read(info->regmap, reg, &val);
+ ret = regmap_read(info->regmap, reg, &val);
+ if (ret)
+ return ret;
if (val & mask)
return GPIO_LINE_DIRECTION_OUT;
@@ -417,22 +418,22 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
- unsigned int reg = OUTPUT_EN;
+ unsigned int en_offset = offset;
+ unsigned int reg = OUTPUT_VAL;
unsigned int mask, val, ret;
armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
+ val = value ? mask : 0;
- ret = regmap_update_bits(info->regmap, reg, mask, mask);
-
+ ret = regmap_update_bits(info->regmap, reg, mask, val);
if (ret)
return ret;
- reg = OUTPUT_VAL;
- val = value ? mask : 0;
- regmap_update_bits(info->regmap, reg, mask, val);
+ reg = OUTPUT_EN;
+ armada_37xx_update_reg(&reg, &en_offset);
- return 0;
+ return regmap_update_bits(info->regmap, reg, mask, mask);
}
static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -440,17 +441,20 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
unsigned int reg = INPUT_VAL;
unsigned int val, mask;
+ int ret;
armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
- regmap_read(info->regmap, reg, &val);
+ ret = regmap_read(info->regmap, reg, &val);
+ if (ret)
+ return ret;
return (val & mask) != 0;
}
-static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
unsigned int reg = OUTPUT_VAL;
@@ -460,7 +464,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
mask = BIT(offset);
val = value ? mask : 0;
- regmap_update_bits(info->regmap, reg, mask, val);
+ return regmap_update_bits(info->regmap, reg, mask, val);
}
static int armada_37xx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -469,16 +473,17 @@ static int armada_37xx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
{
struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct gpio_chip *chip = range->gc;
+ int ret;
dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
offset, range->name, offset, input ? "input" : "output");
if (input)
- armada_37xx_gpio_direction_input(chip, offset);
+ ret = armada_37xx_gpio_direction_input(chip, offset);
else
- armada_37xx_gpio_direction_output(chip, offset, 0);
+ ret = armada_37xx_gpio_direction_output(chip, offset, 0);
- return 0;
+ return ret;
}
static int armada_37xx_gpio_request_enable(struct pinctrl_dev *pctldev,
@@ -513,7 +518,7 @@ static const struct pinmux_ops armada_37xx_pmx_ops = {
static const struct gpio_chip armada_37xx_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set = armada_37xx_gpio_set,
+ .set_rv = armada_37xx_gpio_set,
.get = armada_37xx_gpio_get,
.get_direction = armada_37xx_gpio_get_direction,
.direction_input = armada_37xx_gpio_direction_input,
diff --git a/drivers/pinctrl/nomadik/Kconfig b/drivers/pinctrl/nomadik/Kconfig
index aafecf348670..1b4fe2a4c302 100644
--- a/drivers/pinctrl/nomadik/Kconfig
+++ b/drivers/pinctrl/nomadik/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-if ARCH_U8500
+if (ARCH_U8500 || COMPILE_TEST)
config PINCTRL_ABX500
bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions"
@@ -10,11 +10,11 @@ config PINCTRL_ABX500
config PINCTRL_AB8500
bool "AB8500 pin controller driver"
- depends on PINCTRL_ABX500 && ARCH_U8500
+ depends on PINCTRL_ABX500 && (ARCH_U8500 || COMPILE_TEST)
config PINCTRL_AB8505
bool "AB8505 pin controller driver"
- depends on PINCTRL_ABX500 && ARCH_U8500
+ depends on PINCTRL_ABX500 && (ARCH_U8500 || COMPILE_TEST)
endif
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 8cd4ba5cf0bd..2f55f83127cf 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -167,14 +167,10 @@ out:
return bit;
}
-static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int abx500_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
- struct abx500_pinctrl *pct = gpiochip_get_data(chip);
- int ret;
-
- ret = abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
- if (ret < 0)
- dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret);
+ return abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
}
static int abx500_gpio_direction_output(struct gpio_chip *chip,
@@ -540,7 +536,7 @@ static const struct gpio_chip abx500gpio_chip = {
.direction_input = abx500_gpio_direction_input,
.get = abx500_gpio_get,
.direction_output = abx500_gpio_direction_output,
- .set = abx500_gpio_set,
+ .set_rv = abx500_gpio_set,
.to_irq = abx500_gpio_to_irq,
.dbg_show = abx500_gpio_dbg_show,
};
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index a171195b3615..e1ae71610526 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -142,4 +142,21 @@ int pinconf_generic_parse_dt_config(struct device_node *np,
int pinconf_generic_parse_dt_pinmux(struct device_node *np, struct device *dev,
unsigned int **pid, unsigned int **pmux,
unsigned int *npins);
+#else
+static inline int
+pinconf_generic_parse_dt_config(struct device_node *np,
+ struct pinctrl_dev *pctldev,
+ unsigned long **configs,
+ unsigned int *nconfigs)
+{
+ return -ENOTSUPP;
+}
+
+static inline int
+pinconf_generic_parse_dt_pinmux(struct device_node *np, struct device *dev,
+ unsigned int **pid, unsigned int **pmux,
+ unsigned int *npins)
+{
+ return -ENOTSUPP;
+}
#endif
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 5da8c48695e6..5cf3db6d78b7 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -105,7 +105,8 @@ static int amd_gpio_get_value(struct gpio_chip *gc, unsigned offset)
return !!(pin_reg & BIT(PIN_STS_OFF));
}
-static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
+static int amd_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
u32 pin_reg;
unsigned long flags;
@@ -119,6 +120,8 @@ static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
pin_reg &= ~BIT(OUTPUT_VALUE_OFF);
writel(pin_reg, gpio_dev->base + offset * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ return 0;
}
static int amd_gpio_set_debounce(struct amd_gpio *gpio_dev, unsigned int offset,
@@ -1173,7 +1176,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->gc.direction_input = amd_gpio_direction_input;
gpio_dev->gc.direction_output = amd_gpio_direction_output;
gpio_dev->gc.get = amd_gpio_get_value;
- gpio_dev->gc.set = amd_gpio_set_value;
+ gpio_dev->gc.set_rv = amd_gpio_set_value;
gpio_dev->gc.set_config = amd_gpio_set_config;
gpio_dev->gc.dbg_show = amd_gpio_dbg_show;
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index f861e63f4115..0f551d67d482 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -66,7 +66,7 @@ struct apple_gpio_pinctrl {
#define REG_GPIOx_DRIVE_STRENGTH1 GENMASK(23, 22)
#define REG_IRQ(g, x) (0x800 + 0x40 * (g) + 4 * ((x) >> 5))
-struct regmap_config regmap_config = {
+static const struct regmap_config regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -79,13 +79,13 @@ struct regmap_config regmap_config = {
/* No locking needed to mask/unmask IRQs as the interrupt mode is per pin-register. */
static void apple_gpio_set_reg(struct apple_gpio_pinctrl *pctl,
- unsigned int pin, u32 mask, u32 value)
+ unsigned int pin, u32 mask, u32 value)
{
regmap_update_bits(pctl->map, REG_GPIO(pin), mask, value);
}
static u32 apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl,
- unsigned int pin)
+ unsigned int pin)
{
int ret;
u32 val;
@@ -100,9 +100,9 @@ static u32 apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl,
/* Pin controller functions */
static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *node,
- struct pinctrl_map **map,
- unsigned *num_maps)
+ struct device_node *node,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
{
unsigned reserved_maps;
struct apple_gpio_pinctrl *pctl;
@@ -147,8 +147,8 @@ static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
group_name = pinctrl_generic_get_group_name(pctldev, pin);
function_name = pinmux_generic_get_function_name(pctl->pctldev, func);
ret = pinctrl_utils_add_map_mux(pctl->pctldev, map,
- &reserved_maps, num_maps,
- group_name, function_name);
+ &reserved_maps, num_maps,
+ group_name, function_name);
if (ret)
goto free_map;
}
@@ -171,7 +171,7 @@ static const struct pinctrl_ops apple_gpio_pinctrl_ops = {
/* Pin multiplexer functions */
static int apple_gpio_pinmux_set(struct pinctrl_dev *pctldev, unsigned func,
- unsigned group)
+ unsigned group)
{
struct apple_gpio_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
@@ -237,7 +237,7 @@ static int apple_gpio_direction_input(struct gpio_chip *chip, unsigned int offse
}
static int apple_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
+ unsigned int offset, int value)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
@@ -282,7 +282,7 @@ static void apple_gpio_irq_mask(struct irq_data *data)
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(gc);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF));
+ FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF));
gpiochip_disable_irq(gc, data->hwirq);
}
@@ -294,7 +294,7 @@ static void apple_gpio_irq_unmask(struct irq_data *data)
gpiochip_enable_irq(gc, data->hwirq);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, irqtype));
+ FIELD_PREP(REG_GPIOx_MODE, irqtype));
}
static unsigned int apple_gpio_irq_startup(struct irq_data *data)
@@ -303,7 +303,7 @@ static unsigned int apple_gpio_irq_startup(struct irq_data *data)
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_GRP,
- FIELD_PREP(REG_GPIOx_GRP, 0));
+ FIELD_PREP(REG_GPIOx_GRP, 0));
apple_gpio_direction_input(chip, data->hwirq);
apple_gpio_irq_unmask(data);
@@ -320,7 +320,7 @@ static int apple_gpio_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, irqtype));
+ FIELD_PREP(REG_GPIOx_MODE, irqtype));
if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(data, handle_level_irq);
@@ -429,7 +429,7 @@ static int apple_gpio_pinctrl_probe(struct platform_device *pdev)
unsigned int npins;
const char **pin_names;
unsigned int *pin_nums;
- static const char* pinmux_functions[] = {
+ static const char *pinmux_functions[] = {
"gpio", "periph1", "periph2", "periph3"
};
unsigned int i, nirqs = 0;
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index e57ac4ea91dd..ca8a54a43ff5 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -390,7 +390,7 @@ static int atmel_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static void atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
+static int atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
struct atmel_pin *pin = atmel_pioctrl->pins[offset];
@@ -398,10 +398,12 @@ static void atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
atmel_gpio_write(atmel_pioctrl, pin->bank,
val ? ATMEL_PIO_SODR : ATMEL_PIO_CODR,
BIT(pin->line));
+
+ return 0;
}
-static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
unsigned int bank;
@@ -431,6 +433,8 @@ static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
bits[word] >>= ATMEL_PIO_NPINS_PER_BANK;
#endif
}
+
+ return 0;
}
static struct gpio_chip atmel_gpio_chip = {
@@ -438,8 +442,8 @@ static struct gpio_chip atmel_gpio_chip = {
.get = atmel_gpio_get,
.get_multiple = atmel_gpio_get_multiple,
.direction_output = atmel_gpio_direction_output,
- .set = atmel_gpio_set,
- .set_multiple = atmel_gpio_set_multiple,
+ .set_rv = atmel_gpio_set,
+ .set_multiple_rv = atmel_gpio_set_multiple,
.to_irq = atmel_gpio_to_irq,
.base = 0,
};
@@ -609,8 +613,10 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (ret)
goto exit;
- pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
+ ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
group, func);
+ if (ret)
+ goto exit;
if (num_configs) {
ret = pinctrl_utils_add_map_configs(pctldev, map,
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 93ab277d9943..6c2727bd55bc 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1449,18 +1449,19 @@ static int at91_gpio_get(struct gpio_chip *chip, unsigned offset)
return (pdsr & mask) != 0;
}
-static void at91_gpio_set(struct gpio_chip *chip, unsigned offset,
- int val)
+static int at91_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct at91_gpio_chip *at91_gpio = gpiochip_get_data(chip);
void __iomem *pio = at91_gpio->regbase;
unsigned mask = 1 << offset;
writel_relaxed(mask, pio + (val ? PIO_SODR : PIO_CODR));
+
+ return 0;
}
-static void at91_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
+static int at91_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
struct at91_gpio_chip *at91_gpio = gpiochip_get_data(chip);
void __iomem *pio = at91_gpio->regbase;
@@ -1472,6 +1473,8 @@ static void at91_gpio_set_multiple(struct gpio_chip *chip,
writel_relaxed(set_mask, pio + PIO_SODR);
writel_relaxed(clear_mask, pio + PIO_CODR);
+
+ return 0;
}
static int at91_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -1798,8 +1801,8 @@ static const struct gpio_chip at91_gpio_template = {
.direction_input = at91_gpio_direction_input,
.get = at91_gpio_get,
.direction_output = at91_gpio_direction_output,
- .set = at91_gpio_set,
- .set_multiple = at91_gpio_set_multiple,
+ .set_rv = at91_gpio_set,
+ .set_multiple_rv = at91_gpio_set_multiple,
.dbg_show = at91_gpio_dbg_show,
.can_sleep = false,
.ngpio = MAX_NB_GPIO_PER_BANK,
@@ -1819,12 +1822,16 @@ static int at91_gpio_probe(struct platform_device *pdev)
struct at91_gpio_chip *at91_chip = NULL;
struct gpio_chip *chip;
struct pinctrl_gpio_range *range;
+ int alias_idx;
int ret = 0;
int irq, i;
- int alias_idx = of_alias_get_id(np, "gpio");
uint32_t ngpio;
char **names;
+ alias_idx = of_alias_get_id(np, "gpio");
+ if (alias_idx < 0)
+ return alias_idx;
+
BUG_ON(alias_idx >= ARRAY_SIZE(gpio_chips));
if (gpio_chips[alias_idx])
return dev_err_probe(dev, -EBUSY, "%d slot is occupied.\n", alias_idx);
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index 2b4805e74eed..fff408b60c4a 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -192,34 +192,30 @@ static int axp20x_gpio_get_direction(struct gpio_chip *chip,
static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
- chip->set(chip, offset, value);
-
- return 0;
+ return chip->set_rv(chip, offset, value);
}
-static void axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct axp20x_pctl *pctl = gpiochip_get_data(chip);
int reg;
/* AXP209 has GPIO3 status sharing the settings register */
- if (offset == 3) {
- regmap_update_bits(pctl->regmap, AXP20X_GPIO3_CTRL,
- AXP20X_GPIO3_FUNCTIONS,
- value ? AXP20X_GPIO3_FUNCTION_OUT_HIGH :
- AXP20X_GPIO3_FUNCTION_OUT_LOW);
- return;
- }
+ if (offset == 3)
+ return regmap_update_bits(pctl->regmap, AXP20X_GPIO3_CTRL,
+ AXP20X_GPIO3_FUNCTIONS,
+ value ?
+ AXP20X_GPIO3_FUNCTION_OUT_HIGH :
+ AXP20X_GPIO3_FUNCTION_OUT_LOW);
reg = axp20x_gpio_get_reg(offset);
if (reg < 0)
- return;
+ return reg;
- regmap_update_bits(pctl->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- value ? AXP20X_GPIO_FUNCTION_OUT_HIGH :
- AXP20X_GPIO_FUNCTION_OUT_LOW);
+ return regmap_update_bits(pctl->regmap, reg, AXP20X_GPIO_FUNCTIONS,
+ value ? AXP20X_GPIO_FUNCTION_OUT_HIGH :
+ AXP20X_GPIO_FUNCTION_OUT_LOW);
}
static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset,
@@ -229,12 +225,11 @@ static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset,
int reg;
/* AXP209 GPIO3 settings have a different layout */
- if (offset == 3) {
+ if (offset == 3)
return regmap_update_bits(pctl->regmap, AXP20X_GPIO3_CTRL,
AXP20X_GPIO3_FUNCTIONS,
config == AXP20X_MUX_GPIO_OUT ? AXP20X_GPIO3_FUNCTION_OUT_LOW :
AXP20X_GPIO3_FUNCTION_INPUT);
- }
reg = axp20x_gpio_get_reg(offset);
if (reg < 0)
@@ -468,7 +463,7 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
pctl->chip.owner = THIS_MODULE;
pctl->chip.get = axp20x_gpio_get;
pctl->chip.get_direction = axp20x_gpio_get_direction;
- pctl->chip.set = axp20x_gpio_set;
+ pctl->chip.set_rv = axp20x_gpio_set;
pctl->chip.direction_input = pinctrl_gpio_direction_input;
pctl->chip.direction_output = axp20x_gpio_output;
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 3cfbcaee9e65..8a2fd632bdd4 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -742,14 +742,15 @@ static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
return reg_val ? 1 : 0;
}
-static void cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off,
- int val)
+static int cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
{
struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, val ? bit : 0);
+ return cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit,
+ val ? bit : 0);
}
static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
@@ -908,12 +909,12 @@ static int cy8c95x0_gpio_get_multiple(struct gpio_chip *gc,
return cy8c95x0_read_regs_mask(chip, CY8C95X0_INPUT, bits, mask);
}
-static void cy8c95x0_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int cy8c95x0_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
- cy8c95x0_write_regs_mask(chip, CY8C95X0_OUTPUT, bits, mask);
+ return cy8c95x0_write_regs_mask(chip, CY8C95X0_OUTPUT, bits, mask);
}
static int cy8c95x0_add_pin_ranges(struct gpio_chip *gc)
@@ -938,10 +939,10 @@ static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip)
gc->direction_input = cy8c95x0_gpio_direction_input;
gc->direction_output = cy8c95x0_gpio_direction_output;
gc->get = cy8c95x0_gpio_get_value;
- gc->set = cy8c95x0_gpio_set_value;
+ gc->set_rv = cy8c95x0_gpio_set_value;
gc->get_direction = cy8c95x0_gpio_get_direction;
gc->get_multiple = cy8c95x0_gpio_get_multiple;
- gc->set_multiple = cy8c95x0_gpio_set_multiple;
+ gc->set_multiple_rv = cy8c95x0_gpio_set_multiple;
gc->set_config = gpiochip_generic_config;
gc->can_sleep = true;
gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index a9e48eac15f6..3c660471ec69 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3800,12 +3800,14 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(irq_chip, desc);
}
-static void ingenic_gpio_set(struct gpio_chip *gc,
- unsigned int offset, int value)
+static int ingenic_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
ingenic_gpio_set_value(jzgc, offset, value);
+
+ return 0;
}
static int ingenic_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -4449,7 +4451,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.fwnode = fwnode;
jzgc->gc.owner = THIS_MODULE;
- jzgc->gc.set = ingenic_gpio_set;
+ jzgc->gc.set_rv = ingenic_gpio_set;
jzgc->gc.get = ingenic_gpio_get;
jzgc->gc.direction_input = pinctrl_gpio_direction_input;
jzgc->gc.direction_output = ingenic_gpio_direction_output;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 4d1f41488017..c2f4b16f42d2 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -636,6 +636,14 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ /*
+ * Reset the chip - we don't really know what state it's in, so reset
+ * all pins to input first to prevent surprises.
+ */
+ ret = mcp_write(mcp, MCP_IODIR, mcp->chip.ngpio == 16 ? 0xFFFF : 0xFF);
+ if (ret < 0)
+ return ret;
+
/* verify MCP_IOCON.SEQOP = 0, so sequential reads work,
* and MCP_IOCON.HAEN = 1, so we work with all chips.
*/
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index a60db93b61b1..88c2f14cfc6b 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -555,10 +555,10 @@ static int microchip_sgpio_get_direction(struct gpio_chip *gc, unsigned int gpio
return bank->is_input ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
-static void microchip_sgpio_set_value(struct gpio_chip *gc,
- unsigned int gpio, int value)
+static int microchip_sgpio_set_value(struct gpio_chip *gc, unsigned int gpio,
+ int value)
{
- microchip_sgpio_direction_output(gc, gpio, value);
+ return microchip_sgpio_direction_output(gc, gpio, value);
}
static int microchip_sgpio_get_value(struct gpio_chip *gc, unsigned int gpio)
@@ -858,7 +858,7 @@ static int microchip_sgpio_register_bank(struct device *dev,
gc->direction_input = microchip_sgpio_direction_input;
gc->direction_output = microchip_sgpio_direction_output;
gc->get = microchip_sgpio_get_value;
- gc->set = microchip_sgpio_set_value;
+ gc->set_rv = microchip_sgpio_set_value;
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->of_xlate = microchip_sgpio_of_xlate;
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 329d54b11529..fbb3d43746bb 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1950,17 +1950,18 @@ static int ocelot_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset % 32));
}
-static void ocelot_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ocelot_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ocelot_pinctrl *info = gpiochip_get_data(chip);
if (value)
- regmap_write(info->map, REG(OCELOT_GPIO_OUT_SET, info, offset),
- BIT(offset % 32));
- else
- regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset),
- BIT(offset % 32));
+ return regmap_write(info->map,
+ REG(OCELOT_GPIO_OUT_SET, info, offset),
+ BIT(offset % 32));
+
+ return regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset),
+ BIT(offset % 32));
}
static int ocelot_gpio_get_direction(struct gpio_chip *chip,
@@ -1996,7 +1997,7 @@ static int ocelot_gpio_direction_output(struct gpio_chip *chip,
static const struct gpio_chip ocelot_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set = ocelot_gpio_set,
+ .set_rv = ocelot_gpio_set,
.get = ocelot_gpio_get,
.get_direction = ocelot_gpio_get_direction,
.direction_input = pinctrl_gpio_direction_input,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 8c50e0091b32..e7bf60960961 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1186,12 +1186,14 @@ static int pistachio_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(gpio_readl(bank, reg) & BIT(offset));
}
-static void pistachio_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int pistachio_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct pistachio_gpio_bank *bank = gpiochip_get_data(chip);
gpio_mask_writel(bank, GPIO_OUTPUT, offset, !!value);
+
+ return 0;
}
static int pistachio_gpio_direction_input(struct gpio_chip *chip,
@@ -1326,7 +1328,7 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
.direction_input = pistachio_gpio_direction_input, \
.direction_output = pistachio_gpio_direction_output, \
.get = pistachio_gpio_get, \
- .set = pistachio_gpio_set, \
+ .set_rv = pistachio_gpio_set, \
.base = _pin_base, \
.ngpio = _npins, \
}, \
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index c42f1bf93404..fc0e330b1d11 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -325,26 +325,26 @@ static int rk805_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & pci->pin_cfg[offset].val_msk);
}
-static void rk805_gpio_set(struct gpio_chip *chip,
- unsigned int offset,
- int value)
+static int rk805_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct rk805_pctrl_info *pci = gpiochip_get_data(chip);
- int ret;
- ret = regmap_update_bits(pci->rk808->regmap,
- pci->pin_cfg[offset].reg,
- pci->pin_cfg[offset].val_msk,
- value ? pci->pin_cfg[offset].val_msk : 0);
- if (ret)
- dev_err(pci->dev, "set gpio%d value %d failed\n",
- offset, value);
+ return regmap_update_bits(pci->rk808->regmap,
+ pci->pin_cfg[offset].reg,
+ pci->pin_cfg[offset].val_msk,
+ value ? pci->pin_cfg[offset].val_msk : 0);
}
static int rk805_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
- rk805_gpio_set(chip, offset, value);
+ int ret;
+
+ ret = rk805_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
+
return pinctrl_gpio_direction_output(chip, offset);
}
@@ -378,7 +378,7 @@ static const struct gpio_chip rk805_gpio_chip = {
.free = gpiochip_generic_free,
.get_direction = rk805_gpio_get_direction,
.get = rk805_gpio_get,
- .set = rk805_gpio_set,
+ .set_rv = rk805_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = rk805_gpio_direction_output,
.can_sleep = true,
diff --git a/drivers/pinctrl/pinctrl-scmi.c b/drivers/pinctrl/pinctrl-scmi.c
index df4bbcd7d1d5..383681041e4c 100644
--- a/drivers/pinctrl/pinctrl-scmi.c
+++ b/drivers/pinctrl/pinctrl-scmi.c
@@ -507,6 +507,7 @@ static int pinctrl_scmi_get_pins(struct scmi_pinctrl *pmx,
static const char * const scmi_pinctrl_blocklist[] = {
"fsl,imx95",
+ "fsl,imx94",
NULL
};
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index aae01120dc52..f4fdcaa043e6 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -115,14 +115,14 @@ static int stmfx_gpio_get(struct gpio_chip *gc, unsigned int offset)
return ret ? ret : !!(value & mask);
}
-static void stmfx_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int stmfx_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
u32 reg = value ? STMFX_REG_GPO_SET : STMFX_REG_GPO_CLR;
u32 mask = get_mask(offset);
- regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset),
- mask, mask);
+ return regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset),
+ mask, mask);
}
static int stmfx_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -161,8 +161,11 @@ static int stmfx_gpio_direction_output(struct gpio_chip *gc,
struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
u32 reg = STMFX_REG_GPIO_DIR + get_reg(offset);
u32 mask = get_mask(offset);
+ int ret;
- stmfx_gpio_set(gc, offset, value);
+ ret = stmfx_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
return regmap_write_bits(pctl->stmfx->map, reg, mask, mask);
}
@@ -694,7 +697,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->gpio_chip.direction_input = stmfx_gpio_direction_input;
pctl->gpio_chip.direction_output = stmfx_gpio_direction_output;
pctl->gpio_chip.get = stmfx_gpio_get;
- pctl->gpio_chip.set = stmfx_gpio_set;
+ pctl->gpio_chip.set_rv = stmfx_gpio_set;
pctl->gpio_chip.set_config = gpiochip_generic_config;
pctl->gpio_chip.base = -1;
pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 98262b8ce43a..d3a12c1c0de2 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -432,24 +432,25 @@ static int sx150x_gpio_oscio_set(struct sx150x_pinctrl *pctl,
(value ? 0x1f : 0x10));
}
-static void sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
if (sx150x_pin_is_oscio(pctl, offset))
- sx150x_gpio_oscio_set(pctl, value);
- else
- __sx150x_gpio_set(pctl, offset, value);
+ return sx150x_gpio_oscio_set(pctl, value);
+
+ return __sx150x_gpio_set(pctl, offset, value);
}
-static void sx150x_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask,
- unsigned long *bits)
+static int sx150x_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
{
struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
- regmap_write_bits(pctl->regmap, pctl->data->reg_data, *mask, *bits);
+ return regmap_write_bits(pctl->regmap, pctl->data->reg_data, *mask,
+ *bits);
}
static int sx150x_gpio_direction_input(struct gpio_chip *chip,
@@ -1175,7 +1176,7 @@ static int sx150x_probe(struct i2c_client *client)
pctl->gpio.direction_input = sx150x_gpio_direction_input;
pctl->gpio.direction_output = sx150x_gpio_direction_output;
pctl->gpio.get = sx150x_gpio_get;
- pctl->gpio.set = sx150x_gpio_set;
+ pctl->gpio.set_rv = sx150x_gpio_set;
pctl->gpio.set_config = gpiochip_generic_config;
pctl->gpio.parent = dev;
pctl->gpio.can_sleep = true;
@@ -1190,7 +1191,7 @@ static int sx150x_probe(struct i2c_client *client)
* would require locking that is not in place at this time.
*/
if (pctl->data->model != SX150X_789)
- pctl->gpio.set_multiple = sx150x_gpio_set_multiple;
+ pctl->gpio.set_multiple_rv = sx150x_gpio_set_multiple;
/* Add Interrupt support if an irq is specified */
if (client->irq > 0) {
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 7366aba5a199..57fefeb603f0 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -327,14 +327,14 @@ static int lpi_gpio_get(struct gpio_chip *chip, unsigned int pin)
LPI_GPIO_VALUE_IN_MASK;
}
-static void lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+static int lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct lpi_pinctrl *state = gpiochip_get_data(chip);
unsigned long config;
config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
- lpi_config_set(state->ctrl, pin, &config, 1);
+ return lpi_config_set(state->ctrl, pin, &config, 1);
}
#ifdef CONFIG_DEBUG_FS
@@ -398,7 +398,7 @@ static const struct gpio_chip lpi_gpio_template = {
.direction_input = lpi_gpio_direction_input,
.direction_output = lpi_gpio_direction_output,
.get = lpi_gpio_get,
- .set = lpi_gpio_set,
+ .set_rv = lpi_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = lpi_gpio_dbg_show,
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 0eb816395dc6..f012ea88aa22 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -635,7 +635,7 @@ static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(val & BIT(g->in_bit));
}
-static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int msm_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
const struct msm_pingroup *g;
struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
@@ -654,6 +654,8 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
msm_writel_io(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
}
#ifdef CONFIG_DEBUG_FS
@@ -790,7 +792,7 @@ static const struct gpio_chip msm_gpio_template = {
.direction_output = msm_gpio_direction_output,
.get_direction = msm_gpio_get_direction,
.get = msm_gpio_get,
- .set = msm_gpio_set,
+ .set_rv = msm_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = msm_gpio_dbg_show,
diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
index ba699eac9ee8..f885af571ec9 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
@@ -37,6 +37,8 @@
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
.oe_bit = 9, \
.in_bit = 0, \
.out_bit = 1, \
@@ -387,6 +389,7 @@ enum qcm2290_functions {
msm_mux_ddr_pxi1,
msm_mux_ddr_pxi2,
msm_mux_ddr_pxi3,
+ msm_mux_egpio,
msm_mux_gcc_gp1,
msm_mux_gcc_gp2,
msm_mux_gcc_gp3,
@@ -816,6 +819,13 @@ static const char * const sd_write_groups[] = {
static const char * const jitter_bist_groups[] = {
"gpio96", "gpio97",
};
+static const char * const egpio_groups[] = {
+ "gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103",
+ "gpio104", "gpio105", "gpio106", "gpio107", "gpio108", "gpio109",
+ "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115",
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121",
+ "gpio122", "gpio123", "gpio124", "gpio125", "gpio126",
+};
static const char * const ddr_pxi2_groups[] = {
"gpio102", "gpio103",
};
@@ -851,6 +861,7 @@ static const struct pinfunction qcm2290_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi1),
MSM_PIN_FUNCTION(ddr_pxi2),
MSM_PIN_FUNCTION(ddr_pxi3),
+ MSM_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
@@ -1037,35 +1048,35 @@ static const struct msm_pingroup qcm2290_groups[] = {
[95] = PINGROUP(95, nav_gpio, gp_pdm0, qdss_gpio, wlan1_adc1, _, _, _, _, _),
[96] = PINGROUP(96, qup4, nav_gpio, mdp_vsync, gp_pdm1, sd_write, jitter_bist, qdss_cti, qdss_cti, _),
[97] = PINGROUP(97, qup4, nav_gpio, mdp_vsync, gp_pdm2, jitter_bist, qdss_cti, qdss_cti, _, _),
- [98] = PINGROUP(98, _, _, _, _, _, _, _, _, _),
- [99] = PINGROUP(99, _, _, _, _, _, _, _, _, _),
- [100] = PINGROUP(100, atest, _, _, _, _, _, _, _, _),
- [101] = PINGROUP(101, atest, _, _, _, _, _, _, _, _),
- [102] = PINGROUP(102, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, _),
- [103] = PINGROUP(103, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, _),
- [104] = PINGROUP(104, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, pwm_8, _, _),
- [105] = PINGROUP(105, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, _, _, _),
- [106] = PINGROUP(106, nav_gpio, gcc_gp3, qdss_gpio, _, _, _, _, _, _),
- [107] = PINGROUP(107, nav_gpio, gcc_gp2, qdss_gpio, _, _, _, _, _, _),
- [108] = PINGROUP(108, nav_gpio, _, _, _, _, _, _, _, _),
- [109] = PINGROUP(109, _, qdss_gpio, _, _, _, _, _, _, _),
- [110] = PINGROUP(110, _, qdss_gpio, _, _, _, _, _, _, _),
- [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _),
- [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _),
- [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _),
- [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _),
- [115] = PINGROUP(115, _, pwm_9, _, _, _, _, _, _, _),
- [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _),
- [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _),
- [118] = PINGROUP(118, _, _, _, _, _, _, _, _, _),
- [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _),
- [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _),
- [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _),
- [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _),
- [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _),
- [124] = PINGROUP(124, _, _, _, _, _, _, _, _, _),
- [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _),
- [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, _, _, _, _, _, _, _, _, egpio),
+ [99] = PINGROUP(99, _, _, _, _, _, _, _, _, egpio),
+ [100] = PINGROUP(100, atest, _, _, _, _, _, _, _, egpio),
+ [101] = PINGROUP(101, atest, _, _, _, _, _, _, _, egpio),
+ [102] = PINGROUP(102, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, egpio),
+ [103] = PINGROUP(103, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, egpio),
+ [104] = PINGROUP(104, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, pwm_8, _, egpio),
+ [105] = PINGROUP(105, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, _, _, egpio),
+ [106] = PINGROUP(106, nav_gpio, gcc_gp3, qdss_gpio, _, _, _, _, _, egpio),
+ [107] = PINGROUP(107, nav_gpio, gcc_gp2, qdss_gpio, _, _, _, _, _, egpio),
+ [108] = PINGROUP(108, nav_gpio, _, _, _, _, _, _, _, egpio),
+ [109] = PINGROUP(109, _, qdss_gpio, _, _, _, _, _, _, egpio),
+ [110] = PINGROUP(110, _, qdss_gpio, _, _, _, _, _, _, egpio),
+ [111] = PINGROUP(111, _, _, _, _, _, _, _, _, egpio),
+ [112] = PINGROUP(112, _, _, _, _, _, _, _, _, egpio),
+ [113] = PINGROUP(113, _, _, _, _, _, _, _, _, egpio),
+ [114] = PINGROUP(114, _, _, _, _, _, _, _, _, egpio),
+ [115] = PINGROUP(115, _, pwm_9, _, _, _, _, _, _, egpio),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, egpio),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, egpio),
+ [118] = PINGROUP(118, _, _, _, _, _, _, _, _, egpio),
+ [119] = PINGROUP(119, _, _, _, _, _, _, _, _, egpio),
+ [120] = PINGROUP(120, _, _, _, _, _, _, _, _, egpio),
+ [121] = PINGROUP(121, _, _, _, _, _, _, _, _, egpio),
+ [122] = PINGROUP(122, _, _, _, _, _, _, _, _, egpio),
+ [123] = PINGROUP(123, _, _, _, _, _, _, _, _, egpio),
+ [124] = PINGROUP(124, _, _, _, _, _, _, _, _, egpio),
+ [125] = PINGROUP(125, _, _, _, _, _, _, _, _, egpio),
+ [126] = PINGROUP(126, _, _, _, _, _, _, _, _, egpio),
[127] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x84004, 0, 0),
[128] = SDC_QDSD_PINGROUP(sdc1_clk, 0x84000, 13, 6),
[129] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x84000, 11, 3),
@@ -1095,6 +1106,7 @@ static const struct msm_pinctrl_soc_data qcm2290_pinctrl = {
.ngpios = 127,
.wakeirq_map = qcm2290_mpm_map,
.nwakeirq_map = ARRAY_SIZE(qcm2290_mpm_map),
+ .egpio_func = 9,
};
static int qcm2290_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c
index 23015b055f6a..17ca743c2210 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs615.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c
@@ -1062,7 +1062,7 @@ static const struct msm_pinctrl_soc_data qcs615_tlmm = {
.nfunctions = ARRAY_SIZE(qcs615_functions),
.groups = qcs615_groups,
.ngroups = ARRAY_SIZE(qcs615_groups),
- .ngpios = 123,
+ .ngpios = 124,
.tiles = qcs615_tiles,
.ntiles = ARRAY_SIZE(qcs615_tiles),
.wakeirq_map = qcs615_pdc_map,
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs8300.c b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
index ba6de944a859..5f5f7c4ac644 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs8300.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
@@ -1204,7 +1204,7 @@ static const struct msm_pinctrl_soc_data qcs8300_pinctrl = {
.nfunctions = ARRAY_SIZE(qcs8300_functions),
.groups = qcs8300_groups,
.ngroups = ARRAY_SIZE(qcs8300_groups),
- .ngpios = 133,
+ .ngpios = 134,
.wakeirq_map = qcs8300_pdc_map,
.nwakeirq_map = ARRAY_SIZE(qcs8300_pdc_map),
.egpio_func = 11,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index c8ce61066070..bc082bfb52ef 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -764,14 +764,14 @@ static int pmic_gpio_get(struct gpio_chip *chip, unsigned pin)
return !!pad->out_value;
}
-static void pmic_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+static int pmic_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct pmic_gpio_state *state = gpiochip_get_data(chip);
unsigned long config;
config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
- pmic_gpio_config_set(state->ctrl, pin, &config, 1);
+ return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
}
static int pmic_gpio_of_xlate(struct gpio_chip *chip,
@@ -802,7 +802,7 @@ static const struct gpio_chip pmic_gpio_gpio_template = {
.direction_input = pmic_gpio_direction_input,
.direction_output = pmic_gpio_direction_output,
.get = pmic_gpio_get,
- .set = pmic_gpio_set,
+ .set_rv = pmic_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.of_xlate = pmic_gpio_of_xlate,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 7b28c5fb2402..ba9084978f90 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -600,14 +600,14 @@ static int pmic_mpp_get(struct gpio_chip *chip, unsigned pin)
return !!pad->out_value;
}
-static void pmic_mpp_set(struct gpio_chip *chip, unsigned pin, int value)
+static int pmic_mpp_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct pmic_mpp_state *state = gpiochip_get_data(chip);
unsigned long config;
config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
- pmic_mpp_config_set(state->ctrl, pin, &config, 1);
+ return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
}
static int pmic_mpp_of_xlate(struct gpio_chip *chip,
@@ -638,7 +638,7 @@ static const struct gpio_chip pmic_mpp_gpio_template = {
.direction_input = pmic_mpp_direction_input,
.direction_output = pmic_mpp_direction_output,
.get = pmic_mpp_get,
- .set = pmic_mpp_set,
+ .set_rv = pmic_mpp_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.of_xlate = pmic_mpp_of_xlate,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 82679417e25f..3a8014ebf064 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -507,7 +507,8 @@ static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
return ret;
}
-static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pm8xxx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
@@ -519,7 +520,7 @@ static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
val |= pin->open_drain << 1;
val |= pin->output_value;
- pm8xxx_write_bank(pctrl, pin, 1, val);
+ return pm8xxx_write_bank(pctrl, pin, 1, val);
}
static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
@@ -596,7 +597,7 @@ static const struct gpio_chip pm8xxx_gpio_template = {
.direction_input = pm8xxx_gpio_direction_input,
.direction_output = pm8xxx_gpio_direction_output,
.get = pm8xxx_gpio_get,
- .set = pm8xxx_gpio_set,
+ .set_rv = pm8xxx_gpio_set,
.of_xlate = pm8xxx_gpio_of_xlate,
.dbg_show = pm8xxx_gpio_dbg_show,
.owner = THIS_MODULE,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 4841bbfe4864..087c37d304fc 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -511,14 +511,15 @@ static int pm8xxx_mpp_get(struct gpio_chip *chip, unsigned offset)
return ret;
}
-static void pm8xxx_mpp_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pm8xxx_mpp_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct pm8xxx_mpp *pctrl = gpiochip_get_data(chip);
struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
pin->output_value = !!value;
- pm8xxx_mpp_update(pctrl, pin);
+ return pm8xxx_mpp_update(pctrl, pin);
}
static int pm8xxx_mpp_of_xlate(struct gpio_chip *chip,
@@ -633,7 +634,7 @@ static const struct gpio_chip pm8xxx_mpp_template = {
.direction_input = pm8xxx_mpp_direction_input,
.direction_output = pm8xxx_mpp_direction_output,
.get = pm8xxx_mpp_get,
- .set = pm8xxx_mpp_set,
+ .set_rv = pm8xxx_mpp_set,
.of_xlate = pm8xxx_mpp_of_xlate,
.dbg_show = pm8xxx_mpp_dbg_show,
.owner = THIS_MODULE,
diff --git a/drivers/pinctrl/qcom/tlmm-test.c b/drivers/pinctrl/qcom/tlmm-test.c
index fd02bf3a76cb..7b99e89e0f67 100644
--- a/drivers/pinctrl/qcom/tlmm-test.c
+++ b/drivers/pinctrl/qcom/tlmm-test.c
@@ -547,6 +547,7 @@ static int tlmm_test_init(struct kunit *test)
struct tlmm_test_priv *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
atomic_set(&priv->intr_count, 0);
atomic_set(&priv->thread_count, 0);
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 3c18d908b21e..e16034fc1bbf 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -42,6 +42,7 @@ config PINCTRL_RENESAS
select PINCTRL_RZG2L if ARCH_RZG2L
select PINCTRL_RZV2M if ARCH_R9A09G011
select PINCTRL_RZG2L if ARCH_R9A09G047
+ select PINCTRL_RZG2L if ARCH_R9A09G056
select PINCTRL_RZG2L if ARCH_R9A09G057
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c72e250f4a15..78fa08ff0faa 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -2230,135 +2230,146 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
PIN_CFG_IO_VMC_SD1)) },
};
-static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = {
- { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) },
- { "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN)) },
- { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_NOD)) },
- { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_NOD)) },
- { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_CKP", RZG2L_SINGLE_PIN_PACK(0x7, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "XSPI0_CKN", RZG2L_SINGLE_PIN_PACK(0x7, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "XSPI0_CS0N", RZG2L_SINGLE_PIN_PACK(0x7, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "XSPI0_DS", RZG2L_SINGLE_PIN_PACK(0x7, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_RESET0N", RZG2L_SINGLE_PIN_PACK(0x7, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "XSPI0_RSTO0N", RZG2L_SINGLE_PIN_PACK(0x7, 5, (PIN_CFG_PUPD)) },
- { "XSPI0_INT0N", RZG2L_SINGLE_PIN_PACK(0x7, 6, (PIN_CFG_PUPD)) },
- { "XSPI0_ECS0N", RZG2L_SINGLE_PIN_PACK(0x7, 7, (PIN_CFG_PUPD)) },
- { "XSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0x8, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0x8, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0x8, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0x8, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO4", RZG2L_SINGLE_PIN_PACK(0x8, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO5", RZG2L_SINGLE_PIN_PACK(0x8, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO6", RZG2L_SINGLE_PIN_PACK(0x8, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO7", RZG2L_SINGLE_PIN_PACK(0x8, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1CLK", RZG2L_SINGLE_PIN_PACK(0xb, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "SD1CMD", RZG2L_SINGLE_PIN_PACK(0xb, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1DAT0", RZG2L_SINGLE_PIN_PACK(0xc, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1DAT1", RZG2L_SINGLE_PIN_PACK(0xc, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1DAT2", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1DAT3", RZG2L_SINGLE_PIN_PACK(0xc, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "PCIE0_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "PCIE1_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "ET0_MDIO", RZG2L_SINGLE_PIN_PACK(0xf, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "ET0_MDC", RZG2L_SINGLE_PIN_PACK(0xf, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_PUPD)) },
- { "ET0_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_TXER", RZG2L_SINGLE_PIN_PACK(0x10, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_RXER", RZG2L_SINGLE_PIN_PACK(0x10, 3, (PIN_CFG_PUPD)) },
- { "ET0_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 4, (PIN_CFG_PUPD)) },
- { "ET0_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "ET0_CRS", RZG2L_SINGLE_PIN_PACK(0x10, 6, (PIN_CFG_PUPD)) },
- { "ET0_COL", RZG2L_SINGLE_PIN_PACK(0x10, 7, (PIN_CFG_PUPD)) },
- { "ET0_TXD0", RZG2L_SINGLE_PIN_PACK(0x11, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_TXD1", RZG2L_SINGLE_PIN_PACK(0x11, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_TXD2", RZG2L_SINGLE_PIN_PACK(0x11, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_TXD3", RZG2L_SINGLE_PIN_PACK(0x11, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_RXD0", RZG2L_SINGLE_PIN_PACK(0x11, 4, (PIN_CFG_PUPD)) },
- { "ET0_RXD1", RZG2L_SINGLE_PIN_PACK(0x11, 5, (PIN_CFG_PUPD)) },
- { "ET0_RXD2", RZG2L_SINGLE_PIN_PACK(0x11, 6, (PIN_CFG_PUPD)) },
- { "ET0_RXD3", RZG2L_SINGLE_PIN_PACK(0x11, 7, (PIN_CFG_PUPD)) },
- { "ET1_MDIO", RZG2L_SINGLE_PIN_PACK(0x12, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "ET1_MDC", RZG2L_SINGLE_PIN_PACK(0x12, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x13, 0, (PIN_CFG_PUPD)) },
- { "ET1_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x13, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+static const struct {
+ struct rzg2l_dedicated_configs common[77];
+ struct rzg2l_dedicated_configs pcie1[1];
+} rzv2h_dedicated_pins = {
+ .common = {
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) },
+ { "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN)) },
+ { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_CKP", RZG2L_SINGLE_PIN_PACK(0x7, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_CKN", RZG2L_SINGLE_PIN_PACK(0x7, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_CS0N", RZG2L_SINGLE_PIN_PACK(0x7, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_DS", RZG2L_SINGLE_PIN_PACK(0x7, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_RESET0N", RZG2L_SINGLE_PIN_PACK(0x7, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_RSTO0N", RZG2L_SINGLE_PIN_PACK(0x7, 5, (PIN_CFG_PUPD)) },
+ { "XSPI0_INT0N", RZG2L_SINGLE_PIN_PACK(0x7, 6, (PIN_CFG_PUPD)) },
+ { "XSPI0_ECS0N", RZG2L_SINGLE_PIN_PACK(0x7, 7, (PIN_CFG_PUPD)) },
+ { "XSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0x8, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0x8, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0x8, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0x8, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO4", RZG2L_SINGLE_PIN_PACK(0x8, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO5", RZG2L_SINGLE_PIN_PACK(0x8, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO6", RZG2L_SINGLE_PIN_PACK(0x8, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO7", RZG2L_SINGLE_PIN_PACK(0x8, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1CLK", RZG2L_SINGLE_PIN_PACK(0xb, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD1CMD", RZG2L_SINGLE_PIN_PACK(0xb, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT0", RZG2L_SINGLE_PIN_PACK(0xc, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT1", RZG2L_SINGLE_PIN_PACK(0xc, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT2", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT3", RZG2L_SINGLE_PIN_PACK(0xc, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "PCIE0_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 0, (PIN_CFG_IOLH_RZV2H |
+ PIN_CFG_SR)) },
+ { "ET0_MDIO", RZG2L_SINGLE_PIN_PACK(0xf, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "ET0_MDC", RZG2L_SINGLE_PIN_PACK(0xf, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
PIN_CFG_PUPD)) },
- { "ET1_TXER", RZG2L_SINGLE_PIN_PACK(0x13, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_RXER", RZG2L_SINGLE_PIN_PACK(0x13, 3, (PIN_CFG_PUPD)) },
- { "ET1_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 4, (PIN_CFG_PUPD)) },
- { "ET1_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "ET1_CRS", RZG2L_SINGLE_PIN_PACK(0x13, 6, (PIN_CFG_PUPD)) },
- { "ET1_COL", RZG2L_SINGLE_PIN_PACK(0x13, 7, (PIN_CFG_PUPD)) },
- { "ET1_TXD0", RZG2L_SINGLE_PIN_PACK(0x14, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_TXD1", RZG2L_SINGLE_PIN_PACK(0x14, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_TXD2", RZG2L_SINGLE_PIN_PACK(0x14, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_TXD3", RZG2L_SINGLE_PIN_PACK(0x14, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_RXD0", RZG2L_SINGLE_PIN_PACK(0x14, 4, (PIN_CFG_PUPD)) },
- { "ET1_RXD1", RZG2L_SINGLE_PIN_PACK(0x14, 5, (PIN_CFG_PUPD)) },
- { "ET1_RXD2", RZG2L_SINGLE_PIN_PACK(0x14, 6, (PIN_CFG_PUPD)) },
- { "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) },
+ { "ET0_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_PUPD)) },
+ { "ET0_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_RZV2H |
+ PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXER", RZG2L_SINGLE_PIN_PACK(0x10, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_RXER", RZG2L_SINGLE_PIN_PACK(0x10, 3, (PIN_CFG_PUPD)) },
+ { "ET0_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 4, (PIN_CFG_PUPD)) },
+ { "ET0_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "ET0_CRS", RZG2L_SINGLE_PIN_PACK(0x10, 6, (PIN_CFG_PUPD)) },
+ { "ET0_COL", RZG2L_SINGLE_PIN_PACK(0x10, 7, (PIN_CFG_PUPD)) },
+ { "ET0_TXD0", RZG2L_SINGLE_PIN_PACK(0x11, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXD1", RZG2L_SINGLE_PIN_PACK(0x11, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXD2", RZG2L_SINGLE_PIN_PACK(0x11, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXD3", RZG2L_SINGLE_PIN_PACK(0x11, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_RXD0", RZG2L_SINGLE_PIN_PACK(0x11, 4, (PIN_CFG_PUPD)) },
+ { "ET0_RXD1", RZG2L_SINGLE_PIN_PACK(0x11, 5, (PIN_CFG_PUPD)) },
+ { "ET0_RXD2", RZG2L_SINGLE_PIN_PACK(0x11, 6, (PIN_CFG_PUPD)) },
+ { "ET0_RXD3", RZG2L_SINGLE_PIN_PACK(0x11, 7, (PIN_CFG_PUPD)) },
+ { "ET1_MDIO", RZG2L_SINGLE_PIN_PACK(0x12, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "ET1_MDC", RZG2L_SINGLE_PIN_PACK(0x12, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x13, 0, (PIN_CFG_PUPD)) },
+ { "ET1_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x13, 1, (PIN_CFG_IOLH_RZV2H |
+ PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXER", RZG2L_SINGLE_PIN_PACK(0x13, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_RXER", RZG2L_SINGLE_PIN_PACK(0x13, 3, (PIN_CFG_PUPD)) },
+ { "ET1_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 4, (PIN_CFG_PUPD)) },
+ { "ET1_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "ET1_CRS", RZG2L_SINGLE_PIN_PACK(0x13, 6, (PIN_CFG_PUPD)) },
+ { "ET1_COL", RZG2L_SINGLE_PIN_PACK(0x13, 7, (PIN_CFG_PUPD)) },
+ { "ET1_TXD0", RZG2L_SINGLE_PIN_PACK(0x14, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXD1", RZG2L_SINGLE_PIN_PACK(0x14, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXD2", RZG2L_SINGLE_PIN_PACK(0x14, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXD3", RZG2L_SINGLE_PIN_PACK(0x14, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_RXD0", RZG2L_SINGLE_PIN_PACK(0x14, 4, (PIN_CFG_PUPD)) },
+ { "ET1_RXD1", RZG2L_SINGLE_PIN_PACK(0x14, 5, (PIN_CFG_PUPD)) },
+ { "ET1_RXD2", RZG2L_SINGLE_PIN_PACK(0x14, 6, (PIN_CFG_PUPD)) },
+ { "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) },
+ },
+ .pcie1 = {
+ { "PCIE1_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 1, (PIN_CFG_IOLH_RZV2H |
+ PIN_CFG_SR)) },
+ },
};
static struct rzg2l_dedicated_configs rzg3e_dedicated_pins[] = {
@@ -3349,13 +3360,37 @@ static struct rzg2l_pinctrl_data r9a09g047_data = {
.bias_param_to_hw = &rzv2h_bias_param_to_hw,
};
+static struct rzg2l_pinctrl_data r9a09g056_data = {
+ .port_pins = rzv2h_gpio_names,
+ .port_pin_configs = r9a09g057_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g057_gpio_configs),
+ .dedicated_pins = rzv2h_dedicated_pins.common,
+ .n_port_pins = ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins.common),
+ .hwcfg = &rzv2h_hwcfg,
+ .variable_pin_cfg = r9a09g057_variable_pin_cfg,
+ .n_variable_pin_cfg = ARRAY_SIZE(r9a09g057_variable_pin_cfg),
+ .num_custom_params = ARRAY_SIZE(renesas_rzv2h_custom_bindings),
+ .custom_params = renesas_rzv2h_custom_bindings,
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = renesas_rzv2h_conf_items,
+#endif
+ .pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
+ .pmc_writeb = &rzv2h_pmc_writeb,
+ .oen_read = &rzv2h_oen_read,
+ .oen_write = &rzv2h_oen_write,
+ .hw_to_bias_param = &rzv2h_hw_to_bias_param,
+ .bias_param_to_hw = &rzv2h_bias_param_to_hw,
+};
+
static struct rzg2l_pinctrl_data r9a09g057_data = {
.port_pins = rzv2h_gpio_names,
.port_pin_configs = r9a09g057_gpio_configs,
.n_ports = ARRAY_SIZE(r9a09g057_gpio_configs),
- .dedicated_pins = rzv2h_dedicated_pins,
+ .dedicated_pins = rzv2h_dedicated_pins.common,
.n_port_pins = ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT,
- .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins),
+ .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins.common) +
+ ARRAY_SIZE(rzv2h_dedicated_pins.pcie1),
.hwcfg = &rzv2h_hwcfg,
.variable_pin_cfg = r9a09g057_variable_pin_cfg,
.n_variable_pin_cfg = ARRAY_SIZE(r9a09g057_variable_pin_cfg),
@@ -3390,6 +3425,10 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = {
.data = &r9a09g047_data,
},
{
+ .compatible = "renesas,r9a09g056-pinctrl",
+ .data = &r9a09g056_data,
+ },
+ {
.compatible = "renesas,r9a09g057-pinctrl",
.data = &r9a09g057_data,
},
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index dd07720e32cc..9fd894729a7b 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -1419,8 +1419,8 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = {
.pin_banks = exynosautov920_pin_banks0,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks0),
.eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
.retention_data = &exynosautov920_retention_data,
}, {
/* pin-controller instance 1 AUD data */
@@ -1431,43 +1431,43 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = {
.pin_banks = exynosautov920_pin_banks2,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 3 HSI1 data */
.pin_banks = exynosautov920_pin_banks3,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks3),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 4 HSI2 data */
.pin_banks = exynosautov920_pin_banks4,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks4),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 5 HSI2UFS data */
.pin_banks = exynosautov920_pin_banks5,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks5),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 6 PERIC0 data */
.pin_banks = exynosautov920_pin_banks6,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks6),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 7 PERIC1 data */
.pin_banks = exynosautov920_pin_banks7,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks7),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
},
};
@@ -1762,15 +1762,15 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
.pin_banks = gs101_pin_alive,
.nr_banks = ARRAY_SIZE(gs101_pin_alive),
.eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (FAR_ALIVE) */
.pin_banks = gs101_pin_far_alive,
.nr_banks = ARRAY_SIZE(gs101_pin_far_alive),
.eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (GSACORE) */
.pin_banks = gs101_pin_gsacore,
@@ -1784,29 +1784,29 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
.pin_banks = gs101_pin_peric0,
.nr_banks = ARRAY_SIZE(gs101_pin_peric0),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (PERIC1) */
.pin_banks = gs101_pin_peric1,
.nr_banks = ARRAY_SIZE(gs101_pin_peric1),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (HSI1) */
.pin_banks = gs101_pin_hsi1,
.nr_banks = ARRAY_SIZE(gs101_pin_hsi1),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (HSI2) */
.pin_banks = gs101_pin_hsi2,
.nr_banks = ARRAY_SIZE(gs101_pin_hsi2),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 42093bae8bb7..f3e1c11abe55 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -370,6 +370,37 @@ struct exynos_eint_gpio_save {
u32 eint_mask;
};
+static void exynos_eint_update_flt_reg(void __iomem *reg, int cnt, int con)
+{
+ unsigned int val, shift;
+ int i;
+
+ val = readl(reg);
+ for (i = 0; i < cnt; i++) {
+ shift = i * EXYNOS_FLTCON_LEN;
+ val &= ~(EXYNOS_FLTCON_DIGITAL << shift);
+ val |= con << shift;
+ }
+ writel(val, reg);
+}
+
+/*
+ * Set the desired filter (digital or analog delay) and enable it to
+ * every pin in the bank. Note the filter selection bitfield is only
+ * found on alive banks. The filter determines to what extent signal
+ * fluctuations received through the pad are considered glitches.
+ */
+static void exynos_eint_set_filter(struct samsung_pin_bank *bank, int filter)
+{
+ unsigned int off = EXYNOS_GPIO_EFLTCON_OFFSET + bank->eint_fltcon_offset;
+ void __iomem *reg = bank->drvdata->virt_base + off;
+ unsigned int con = EXYNOS_FLTCON_EN | filter;
+
+ for (int n = 0; n < bank->nr_pins; n += 4)
+ exynos_eint_update_flt_reg(reg + n,
+ min(bank->nr_pins - n, 4), con);
+}
+
/*
* exynos_eint_gpio_init() - setup handling of external gpio interrupts.
* @d: driver data of samsung pinctrl driver.
@@ -762,153 +793,190 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
return 0;
}
-static void exynos_pinctrl_suspend_bank(
- struct samsung_pinctrl_drv_data *drvdata,
- struct samsung_pin_bank *bank)
+static void exynos_set_wakeup(struct samsung_pin_bank *bank)
{
- struct exynos_eint_gpio_save *save = bank->soc_priv;
- const void __iomem *regs = bank->eint_base;
+ struct exynos_irq_chip *irq_chip;
- if (clk_enable(bank->drvdata->pclk)) {
- dev_err(bank->gpio_chip.parent,
- "unable to enable clock for saving state\n");
- return;
+ if (bank->irq_chip) {
+ irq_chip = bank->irq_chip;
+ irq_chip->set_eint_wakeup_mask(bank->drvdata, irq_chip);
}
-
- save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
- + bank->eint_offset);
- save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset);
- save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset + 4);
- save->eint_mask = readl(regs + bank->irq_chip->eint_mask
- + bank->eint_offset);
-
- clk_disable(bank->drvdata->pclk);
-
- pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
- pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
- pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
- pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
}
-static void exynosauto_pinctrl_suspend_bank(struct samsung_pinctrl_drv_data *drvdata,
- struct samsung_pin_bank *bank)
+void exynos_pinctrl_suspend(struct samsung_pin_bank *bank)
{
struct exynos_eint_gpio_save *save = bank->soc_priv;
const void __iomem *regs = bank->eint_base;
- if (clk_enable(bank->drvdata->pclk)) {
- dev_err(bank->gpio_chip.parent,
- "unable to enable clock for saving state\n");
- return;
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+ save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset);
+ save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset + 4);
+ save->eint_mask = readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
+
+ pr_debug("%s: save con %#010x\n",
+ bank->name, save->eint_con);
+ pr_debug("%s: save fltcon0 %#010x\n",
+ bank->name, save->eint_fltcon0);
+ pr_debug("%s: save fltcon1 %#010x\n",
+ bank->name, save->eint_fltcon1);
+ pr_debug("%s: save mask %#010x\n",
+ bank->name, save->eint_mask);
+ } else if (bank->eint_type == EINT_TYPE_WKUP) {
+ exynos_set_wakeup(bank);
}
-
- save->eint_con = readl(regs + bank->pctl_offset + bank->eint_con_offset);
- save->eint_mask = readl(regs + bank->pctl_offset + bank->eint_mask_offset);
-
- clk_disable(bank->drvdata->pclk);
-
- pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
- pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
}
-void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
+void gs101_pinctrl_suspend(struct samsung_pin_bank *bank)
{
- struct samsung_pin_bank *bank = drvdata->pin_banks;
- struct exynos_irq_chip *irq_chip = NULL;
- int i;
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
+ const void __iomem *regs = bank->eint_base;
- for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
- if (bank->eint_type == EINT_TYPE_GPIO) {
- if (bank->eint_con_offset)
- exynosauto_pinctrl_suspend_bank(drvdata, bank);
- else
- exynos_pinctrl_suspend_bank(drvdata, bank);
- }
- else if (bank->eint_type == EINT_TYPE_WKUP) {
- if (!irq_chip) {
- irq_chip = bank->irq_chip;
- irq_chip->set_eint_wakeup_mask(drvdata,
- irq_chip);
- }
- }
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+
+ save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + bank->eint_fltcon_offset);
+
+ /* fltcon1 register only exists for pins 4-7 */
+ if (bank->nr_pins > 4)
+ save->eint_fltcon1 = readl(regs +
+ EXYNOS_GPIO_EFLTCON_OFFSET
+ + bank->eint_fltcon_offset + 4);
+
+ save->eint_mask = readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
+
+ pr_debug("%s: save con %#010x\n",
+ bank->name, save->eint_con);
+ pr_debug("%s: save fltcon0 %#010x\n",
+ bank->name, save->eint_fltcon0);
+ if (bank->nr_pins > 4)
+ pr_debug("%s: save fltcon1 %#010x\n",
+ bank->name, save->eint_fltcon1);
+ pr_debug("%s: save mask %#010x\n",
+ bank->name, save->eint_mask);
+ } else if (bank->eint_type == EINT_TYPE_WKUP) {
+ exynos_set_wakeup(bank);
+ exynos_eint_set_filter(bank, EXYNOS_FLTCON_ANALOG);
}
}
-static void exynos_pinctrl_resume_bank(
- struct samsung_pinctrl_drv_data *drvdata,
- struct samsung_pin_bank *bank)
+void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank)
{
struct exynos_eint_gpio_save *save = bank->soc_priv;
- void __iomem *regs = bank->eint_base;
+ const void __iomem *regs = bank->eint_base;
- if (clk_enable(bank->drvdata->pclk)) {
- dev_err(bank->gpio_chip.parent,
- "unable to enable clock for restoring state\n");
- return;
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ save->eint_con = readl(regs + bank->pctl_offset +
+ bank->eint_con_offset);
+ save->eint_mask = readl(regs + bank->pctl_offset +
+ bank->eint_mask_offset);
+ pr_debug("%s: save con %#010x\n",
+ bank->name, save->eint_con);
+ pr_debug("%s: save mask %#010x\n",
+ bank->name, save->eint_mask);
+ } else if (bank->eint_type == EINT_TYPE_WKUP) {
+ exynos_set_wakeup(bank);
}
+}
- pr_debug("%s: con %#010x => %#010x\n", bank->name,
- readl(regs + EXYNOS_GPIO_ECON_OFFSET
- + bank->eint_offset), save->eint_con);
- pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
- readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset), save->eint_fltcon0);
- pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
- readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset + 4), save->eint_fltcon1);
- pr_debug("%s: mask %#010x => %#010x\n", bank->name,
- readl(regs + bank->irq_chip->eint_mask
- + bank->eint_offset), save->eint_mask);
-
- writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
- + bank->eint_offset);
- writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset);
- writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset + 4);
- writel(save->eint_mask, regs + bank->irq_chip->eint_mask
- + bank->eint_offset);
+void gs101_pinctrl_resume(struct samsung_pin_bank *bank)
+{
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
- clk_disable(bank->drvdata->pclk);
+ void __iomem *regs = bank->eint_base;
+ void __iomem *eint_fltcfg0 = regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + bank->eint_fltcon_offset;
+
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ pr_debug("%s: con %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset), save->eint_con);
+
+ pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
+ readl(eint_fltcfg0), save->eint_fltcon0);
+
+ /* fltcon1 register only exists for pins 4-7 */
+ if (bank->nr_pins > 4)
+ pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
+ readl(eint_fltcfg0 + 4), save->eint_fltcon1);
+
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset), save->eint_mask);
+
+ writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+ writel(save->eint_fltcon0, eint_fltcfg0);
+
+ if (bank->nr_pins > 4)
+ writel(save->eint_fltcon1, eint_fltcfg0 + 4);
+ writel(save->eint_mask, regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
+ } else if (bank->eint_type == EINT_TYPE_WKUP) {
+ exynos_eint_set_filter(bank, EXYNOS_FLTCON_DIGITAL);
+ }
}
-static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvdata,
- struct samsung_pin_bank *bank)
+void exynos_pinctrl_resume(struct samsung_pin_bank *bank)
{
struct exynos_eint_gpio_save *save = bank->soc_priv;
void __iomem *regs = bank->eint_base;
- if (clk_enable(bank->drvdata->pclk)) {
- dev_err(bank->gpio_chip.parent,
- "unable to enable clock for restoring state\n");
- return;
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ pr_debug("%s: con %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset), save->eint_con);
+ pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset), save->eint_fltcon0);
+ pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset + 4),
+ save->eint_fltcon1);
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset), save->eint_mask);
+
+ writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+ writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset);
+ writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset + 4);
+ writel(save->eint_mask, regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
}
-
- pr_debug("%s: con %#010x => %#010x\n", bank->name,
- readl(regs + bank->pctl_offset + bank->eint_con_offset), save->eint_con);
- pr_debug("%s: mask %#010x => %#010x\n", bank->name,
- readl(regs + bank->pctl_offset + bank->eint_mask_offset), save->eint_mask);
-
- writel(save->eint_con, regs + bank->pctl_offset + bank->eint_con_offset);
- writel(save->eint_mask, regs + bank->pctl_offset + bank->eint_mask_offset);
-
- clk_disable(bank->drvdata->pclk);
}
-void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
+void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank)
{
- struct samsung_pin_bank *bank = drvdata->pin_banks;
- int i;
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
+ void __iomem *regs = bank->eint_base;
- for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
- if (bank->eint_type == EINT_TYPE_GPIO) {
- if (bank->eint_con_offset)
- exynosauto_pinctrl_resume_bank(drvdata, bank);
- else
- exynos_pinctrl_resume_bank(drvdata, bank);
- }
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ /* exynosautov920 has eint_con_offset for all but one bank */
+ if (!bank->eint_con_offset)
+ exynos_pinctrl_resume(bank);
+
+ pr_debug("%s: con %#010x => %#010x\n", bank->name,
+ readl(regs + bank->pctl_offset + bank->eint_con_offset),
+ save->eint_con);
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->pctl_offset +
+ bank->eint_mask_offset), save->eint_mask);
+
+ writel(save->eint_con,
+ regs + bank->pctl_offset + bank->eint_con_offset);
+ writel(save->eint_mask,
+ regs + bank->pctl_offset + bank->eint_mask_offset);
+ }
}
static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata)
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index b483270ddc53..362dc533186f 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -52,6 +52,26 @@
#define EXYNOS_EINT_MAX_PER_BANK 8
#define EXYNOS_EINT_NR_WKUP_EINT
+/*
+ * EINT filter configuration register (on alive banks) has
+ * the following layout.
+ *
+ * BitfieldName[PinNum][Bit:Bit]
+ * FLT_EN[3][31] FLT_SEL[3][30] FLT_WIDTH[3][29:24]
+ * FLT_EN[2][23] FLT_SEL[2][22] FLT_WIDTH[2][21:16]
+ * FLT_EN[1][15] FLT_SEL[1][14] FLT_WIDTH[1][13:8]
+ * FLT_EN[0][7] FLT_SEL[0][6] FLT_WIDTH[0][5:0]
+ *
+ * FLT_EN 0x0 = Disable, 0x1=Enable
+ * FLT_SEL 0x0 = Analog delay filter, 0x1 Digital filter (clock count)
+ * FLT_WIDTH Filtering width. Valid when FLT_SEL is 0x1
+ */
+
+#define EXYNOS_FLTCON_EN BIT(7)
+#define EXYNOS_FLTCON_DIGITAL BIT(6)
+#define EXYNOS_FLTCON_ANALOG (0 << 6)
+#define EXYNOS_FLTCON_LEN 8
+
#define EXYNOS_PIN_BANK_EINTN(pins, reg, id) \
{ \
.type = &bank_type_off, \
@@ -240,8 +260,12 @@ struct exynos_muxed_weint_data {
int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d);
int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d);
-void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata);
-void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata);
+void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank);
+void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank);
+void exynos_pinctrl_suspend(struct samsung_pin_bank *bank);
+void exynos_pinctrl_resume(struct samsung_pin_bank *bank);
+void gs101_pinctrl_suspend(struct samsung_pin_bank *bank);
+void gs101_pinctrl_resume(struct samsung_pin_bank *bank);
struct samsung_retention_ctrl *
exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata,
const struct samsung_retention_data *data);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 2896eb2de2c0..fe1ac82b9d79 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -570,15 +570,18 @@ static void samsung_gpio_set_value(struct gpio_chip *gc,
}
/* gpiolib gpio_set callback function */
-static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int samsung_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
struct samsung_pinctrl_drv_data *drvdata = bank->drvdata;
unsigned long flags;
+ int ret;
- if (clk_enable(drvdata->pclk)) {
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
dev_err(drvdata->dev, "failed to enable clock\n");
- return;
+ return ret;
}
raw_spin_lock_irqsave(&bank->slock, flags);
@@ -586,6 +589,8 @@ static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
raw_spin_unlock_irqrestore(&bank->slock, flags);
clk_disable(drvdata->pclk);
+
+ return 0;
}
/* gpiolib gpio_get callback function */
@@ -1062,7 +1067,7 @@ static int samsung_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
static const struct gpio_chip samsung_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set = samsung_gpio_set,
+ .set_rv = samsung_gpio_set,
.get = samsung_gpio_get,
.direction_input = samsung_gpio_direction_input,
.direction_output = samsung_gpio_direction_output,
@@ -1333,6 +1338,7 @@ err_put_banks:
static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
{
struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
+ struct samsung_pin_bank *bank;
int i;
i = clk_enable(drvdata->pclk);
@@ -1343,7 +1349,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
}
for (i = 0; i < drvdata->nr_banks; i++) {
- struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
+ bank = &drvdata->pin_banks[i];
const void __iomem *reg = bank->pctl_base + bank->pctl_offset;
const u8 *offs = bank->type->reg_offset;
const u8 *widths = bank->type->fld_width;
@@ -1371,10 +1377,14 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
}
}
+ for (i = 0; i < drvdata->nr_banks; i++) {
+ bank = &drvdata->pin_banks[i];
+ if (drvdata->suspend)
+ drvdata->suspend(bank);
+ }
+
clk_disable(drvdata->pclk);
- if (drvdata->suspend)
- drvdata->suspend(drvdata);
if (drvdata->retention_ctrl && drvdata->retention_ctrl->enable)
drvdata->retention_ctrl->enable(drvdata);
@@ -1392,6 +1402,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
{
struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
+ struct samsung_pin_bank *bank;
int ret;
int i;
@@ -1406,11 +1417,14 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
return ret;
}
- if (drvdata->resume)
- drvdata->resume(drvdata);
+ for (i = 0; i < drvdata->nr_banks; i++) {
+ bank = &drvdata->pin_banks[i];
+ if (drvdata->resume)
+ drvdata->resume(bank);
+ }
for (i = 0; i < drvdata->nr_banks; i++) {
- struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
+ bank = &drvdata->pin_banks[i];
void __iomem *reg = bank->pctl_base + bank->pctl_offset;
const u8 *offs = bank->type->reg_offset;
const u8 *widths = bank->type->fld_width;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 3cf758df7d69..fcc57c244d16 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -285,8 +285,8 @@ struct samsung_pin_ctrl {
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
void (*pud_value_init)(struct samsung_pinctrl_drv_data *drvdata);
- void (*suspend)(struct samsung_pinctrl_drv_data *);
- void (*resume)(struct samsung_pinctrl_drv_data *);
+ void (*suspend)(struct samsung_pin_bank *bank);
+ void (*resume)(struct samsung_pin_bank *bank);
};
/**
@@ -335,8 +335,8 @@ struct samsung_pinctrl_drv_data {
struct samsung_retention_ctrl *retention_ctrl;
- void (*suspend)(struct samsung_pinctrl_drv_data *);
- void (*resume)(struct samsung_pinctrl_drv_data *);
+ void (*suspend)(struct samsung_pin_bank *bank);
+ void (*resume)(struct samsung_pin_bank *bank);
};
/**
diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c
index 67e867b04a02..9996b1c4a07e 100644
--- a/drivers/pinctrl/spacemit/pinctrl-k1.c
+++ b/drivers/pinctrl/spacemit/pinctrl-k1.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2024 Yixun Lan <dlan@gentoo.org> */
#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -721,6 +722,7 @@ static int spacemit_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spacemit_pinctrl *pctrl;
+ struct clk *func_clk, *bus_clk;
const struct spacemit_pinctrl_data *pctrl_data;
int ret;
@@ -739,6 +741,14 @@ static int spacemit_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(pctrl->regs))
return PTR_ERR(pctrl->regs);
+ func_clk = devm_clk_get_enabled(dev, "func");
+ if (IS_ERR(func_clk))
+ return dev_err_probe(dev, PTR_ERR(func_clk), "failed to get func clock\n");
+
+ bus_clk = devm_clk_get_enabled(dev, "bus");
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(dev, PTR_ERR(bus_clk), "failed to get bus clock\n");
+
pctrl->pdesc.name = dev_name(dev);
pctrl->pdesc.pins = pctrl_data->pins;
pctrl->pdesc.npins = pctrl_data->npins;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index cc0b4d1d7cff..ba49d48c3a1d 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -228,11 +228,14 @@ static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
}
-static void stm32_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int stm32_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
__stm32_gpio_set(bank, offset, value);
+
+ return 0;
}
static int stm32_gpio_direction_output(struct gpio_chip *chip,
@@ -308,7 +311,7 @@ static const struct gpio_chip stm32_gpio_template = {
.request = stm32_gpio_request,
.free = pinctrl_gpio_free,
.get = stm32_gpio_get,
- .set = stm32_gpio_set,
+ .set_rv = stm32_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = stm32_gpio_direction_output,
.to_irq = stm32_gpio_to_irq,
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
index b71c07d84662..5e3de0df756b 100644
--- a/drivers/pinctrl/uniphier/Kconfig
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -3,7 +3,7 @@ menuconfig PINCTRL_UNIPHIER
bool "UniPhier SoC pinctrl drivers"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && MFD_SYSCON
- default y
+ default ARCH_UNIPHIER
select PINMUX
select GENERIC_PINCONF
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 22074e81bd38..dc2265ebb11b 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -312,15 +312,13 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
{
- char name[TASK_COMM_LEN];
-
- snprintf(name, sizeof(name), "zfcp_q_%s",
- dev_name(&adapter->ccw_device->dev));
- adapter->work_queue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ adapter->work_queue =
+ alloc_ordered_workqueue("zfcp_q_%s", WQ_MEM_RECLAIM,
+ dev_name(&adapter->ccw_device->dev));
+ if (!adapter->work_queue)
+ return -ENOMEM;
- if (adapter->work_queue)
- return 0;
- return -ENOMEM;
+ return 0;
}
static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 8dc6be9a00c1..96b335c92603 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -83,65 +83,6 @@
/*#define DC395x_NO_SYNC*/
/*#define DC395x_NO_WIDE*/
-/*---------------------------------------------------------------------------
- Debugging
- ---------------------------------------------------------------------------*/
-/*
- * Types of debugging that can be enabled and disabled
- */
-#define DBG_KG 0x0001
-#define DBG_0 0x0002
-#define DBG_1 0x0004
-#define DBG_SG 0x0020
-#define DBG_FIFO 0x0040
-#define DBG_PIO 0x0080
-
-
-/*
- * Set set of things to output debugging for.
- * Undefine to remove all debugging
- */
-/*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
-/*#define DEBUG_MASK DBG_0*/
-
-
-/*
- * Output a kernel mesage at the specified level and append the
- * driver name and a ": " to the start of the message
- */
-#define dprintkl(level, format, arg...) \
- printk(level DC395X_NAME ": " format , ## arg)
-
-
-#ifdef DEBUG_MASK
-/*
- * print a debug message - this is formated with KERN_DEBUG, then the
- * driver name followed by a ": " and then the message is output.
- * This also checks that the specified debug level is enabled before
- * outputing the message
- */
-#define dprintkdbg(type, format, arg...) \
- do { \
- if ((type) & (DEBUG_MASK)) \
- dprintkl(KERN_DEBUG , format , ## arg); \
- } while (0)
-
-/*
- * Check if the specified type of debugging is enabled
- */
-#define debug_enabled(type) ((DEBUG_MASK) & (type))
-
-#else
-/*
- * No debugging. Do nothing
- */
-#define dprintkdbg(type, format, arg...) \
- do {} while (0)
-#define debug_enabled(type) (0)
-
-#endif
-
-
#ifndef PCI_VENDOR_ID_TEKRAM
#define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
#endif
@@ -432,7 +373,6 @@ static void *dc395x_scsi_phase1[] = {
/* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
-static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
/*---------------------------------------------------------------------------
@@ -564,7 +504,6 @@ static void set_safe_settings(void)
{
int i;
- dprintkl(KERN_INFO, "Using safe settings.\n");
for (i = 0; i < CFG_NUM; i++)
{
cfg_data[i].value = cfg_data[i].safe;
@@ -581,15 +520,6 @@ static void fix_settings(void)
{
int i;
- dprintkdbg(DBG_1,
- "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
- "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
- cfg_data[CFG_ADAPTER_ID].value,
- cfg_data[CFG_MAX_SPEED].value,
- cfg_data[CFG_DEV_MODE].value,
- cfg_data[CFG_ADAPTER_MODE].value,
- cfg_data[CFG_TAGS].value,
- cfg_data[CFG_RESET_DELAY].value);
for (i = 0; i < CFG_NUM; i++)
{
if (cfg_data[i].value < cfg_data[i].min
@@ -822,8 +752,6 @@ static void waiting_timeout(struct timer_list *t)
{
unsigned long flags;
struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
- dprintkdbg(DBG_1,
- "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
DC395x_LOCK_IO(acb->scsi_host, flags);
waiting_process_next(acb);
DC395x_UNLOCK_IO(acb->scsi_host, flags);
@@ -864,8 +792,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
{
int nseg;
enum dma_data_direction dir = cmd->sc_data_direction;
- dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
- cmd, dcb->target_id, dcb->target_lun);
srb->dcb = dcb;
srb->cmd = cmd;
@@ -887,12 +813,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
- if (dir == DMA_NONE || !nseg) {
- dprintkdbg(DBG_0,
- "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
- cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
- srb->segment_x[0].address);
- } else {
+ if (!(dir == DMA_NONE || !nseg)) {
int i;
u32 reqlen = scsi_bufflen(cmd);
struct scatterlist *sg;
@@ -900,11 +821,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
srb->sg_count = nseg;
- dprintkdbg(DBG_0,
- "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
- reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
- srb->sg_count);
-
scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
u32 busaddr = (u32)sg_dma_address(sg);
u32 seglen = (u32)sg->length;
@@ -933,8 +849,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
- dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
- srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
}
srb->request_length = srb->total_xfer_length;
@@ -966,8 +880,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
struct ScsiReqBlk *srb;
struct AdapterCtlBlk *acb =
(struct AdapterCtlBlk *)cmd->device->host->hostdata;
- dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
- cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
/* Assume BAD_TARGET; will be cleared later */
set_host_byte(cmd, DID_BAD_TARGET);
@@ -975,37 +887,26 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
/* ignore invalid targets */
if (cmd->device->id >= acb->scsi_host->max_id ||
cmd->device->lun >= acb->scsi_host->max_lun ||
- cmd->device->lun >31) {
+ cmd->device->lun > 31)
goto complete;
- }
/* does the specified lun on the specified device exist */
- if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
- dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
- cmd->device->id, (u8)cmd->device->lun);
+ if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun)))
goto complete;
- }
/* do we have a DCB for the device */
dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
- if (!dcb) {
- /* should never happen */
- dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
- cmd->device->id, (u8)cmd->device->lun);
+ if (!dcb)
goto complete;
- }
set_host_byte(cmd, DID_OK);
set_status_byte(cmd, SAM_STAT_GOOD);
srb = list_first_entry_or_null(&acb->srb_free_list,
- struct ScsiReqBlk, list);
+ struct ScsiReqBlk, list);
+
if (!srb) {
- /*
- * Return 1 since we are unable to queue this command at this
- * point in time.
- */
- dprintkdbg(DBG_0, "queue_command: No free srb's\n");
+ /* should never happen */
return 1;
}
list_del(&srb->list);
@@ -1020,7 +921,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
/* process immediately */
send_srb(acb, srb);
}
- dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
return 0;
complete:
@@ -1036,82 +936,8 @@ complete:
static DEF_SCSI_QCMD(dc395x_queue_command)
-static void dump_register_info(struct AdapterCtlBlk *acb,
- struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
- u16 pstat;
- struct pci_dev *dev = acb->dev;
- pci_read_config_word(dev, PCI_STATUS, &pstat);
- if (!dcb)
- dcb = acb->active_dcb;
- if (!srb && dcb)
- srb = dcb->active_srb;
- if (srb) {
- if (!srb->cmd)
- dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
- srb, srb->cmd);
- else
- dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
- "cmnd=0x%02x <%02i-%i>\n",
- srb, srb->cmd,
- srb->cmd->cmnd[0], srb->cmd->device->id,
- (u8)srb->cmd->device->lun);
- printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
- srb->segment_x, srb->sg_count, srb->sg_index,
- srb->total_xfer_length);
- printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
- srb->state, srb->status, srb->scsi_phase,
- (acb->active_dcb) ? "" : "not");
- }
- dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
- "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
- "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
- "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
- DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
- DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
- DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
- DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
- DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
- DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
- DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
- DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
- DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
- DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
- DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
- DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
- dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
- "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
- "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
- DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
- DC395x_read8(acb, TRM_S1040_DMA_STATUS),
- DC395x_read8(acb, TRM_S1040_DMA_INTEN),
- DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
- DC395x_read32(acb, TRM_S1040_DMA_XCNT),
- DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
- DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
- DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
- dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
- "pci{status=0x%04x}\n",
- DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
- DC395x_read8(acb, TRM_S1040_GEN_STATUS),
- DC395x_read8(acb, TRM_S1040_GEN_TIMER),
- pstat);
-}
-
-
static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
{
-#if debug_enabled(DBG_FIFO)
- u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
- u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
- if (!(fifocnt & 0x40))
- dprintkdbg(DBG_FIFO,
- "clear_fifo: (%i bytes) on phase %02x in %s\n",
- fifocnt & 0x3f, lines, txt);
-#endif
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
}
@@ -1120,7 +946,6 @@ static void reset_dev_param(struct AdapterCtlBlk *acb)
{
struct DeviceCtlBlk *dcb;
struct NvRamType *eeprom = &acb->eeprom;
- dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
list_for_each_entry(dcb, &acb->dcb_list, list) {
u8 period_index;
@@ -1148,9 +973,6 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
{
struct AdapterCtlBlk *acb =
(struct AdapterCtlBlk *)cmd->device->host->hostdata;
- dprintkl(KERN_INFO,
- "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
- cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
if (timer_pending(&acb->waiting_timer))
timer_delete(&acb->waiting_timer);
@@ -1216,14 +1038,10 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
(struct AdapterCtlBlk *)cmd->device->host->hostdata;
struct DeviceCtlBlk *dcb;
struct ScsiReqBlk *srb;
- dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
- cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
- if (!dcb) {
- dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
+ if (!dcb)
return FAILED;
- }
srb = find_cmd(cmd, &dcb->srb_waiting_list);
if (srb) {
@@ -1232,16 +1050,12 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
pci_unmap_srb(acb, srb);
free_tag(dcb, srb);
list_add_tail(&srb->list, &acb->srb_free_list);
- dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
set_host_byte(cmd, DID_ABORT);
return SUCCESS;
}
srb = find_cmd(cmd, &dcb->srb_going_list);
if (srb) {
- dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
/* XXX: Should abort the command here */
- } else {
- dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
}
return FAILED;
}
@@ -1253,10 +1067,6 @@ static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
{
u8 *ptr = srb->msgout_buf + srb->msg_count;
if (srb->msg_count > 1) {
- dprintkl(KERN_INFO,
- "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
- srb->msg_count, srb->msgout_buf[0],
- srb->msgout_buf[1]);
return;
}
if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
@@ -1278,13 +1088,9 @@ static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
(acb->config & HCC_WIDE_CARD)) ? 1 : 0;
u8 *ptr = srb->msgout_buf + srb->msg_count;
- if (srb->msg_count > 1) {
- dprintkl(KERN_INFO,
- "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
- srb->msg_count, srb->msgout_buf[0],
- srb->msgout_buf[1]);
+ if (srb->msg_count > 1)
return;
- }
+
srb->msg_count += spi_populate_width_msg(ptr, wide);
srb->state |= SRB_DO_WIDE_NEGO;
}
@@ -1316,11 +1122,9 @@ void selection_timeout_missed(unsigned long ptr)
unsigned long flags;
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
struct ScsiReqBlk *srb;
- dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
- if (!acb->active_dcb || !acb->active_dcb->active_srb) {
- dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
+ if (!acb->active_dcb || !acb->active_dcb->active_srb)
return;
- }
+
DC395x_LOCK_IO(acb->scsi_host, flags);
srb = acb->active_dcb->active_srb;
disconnect(acb);
@@ -1335,8 +1139,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
u16 __maybe_unused s_stat2, return_code;
u8 s_stat, scsicommand, i, identify_message;
u8 *ptr;
- dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
- dcb->target_id, dcb->target_lun, srb);
srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
@@ -1345,8 +1147,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
#if 1
if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
- dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
- s_stat, s_stat2);
/*
* Try anyway?
*
@@ -1361,24 +1161,16 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
return 1;
}
#endif
- if (acb->active_dcb) {
- dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
- "command while another command (0x%p) is active.",
- srb->cmd,
- acb->active_dcb->active_srb ?
- acb->active_dcb->active_srb->cmd : NULL);
+ if (acb->active_dcb)
return 1;
- }
- if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
- dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
+
+ if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT)
return 1;
- }
+
/* Allow starting of SCSI commands half a second before we allow the mid-level
* to queue them again after a reset */
- if (time_before(jiffies, acb->last_reset - HZ / 2)) {
- dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
+ if (time_before(jiffies, acb->last_reset - HZ / 2))
return 1;
- }
/* Flush FIFO */
clear_fifo(acb, "start_scsi");
@@ -1442,10 +1234,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
tag_number++;
}
if (tag_number >= dcb->max_command) {
- dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
- "Out of tags target=<%02i-%i>)\n",
- srb->cmd, srb->cmd->device->id,
- (u8)srb->cmd->device->lun);
srb->state = SRB_READY;
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
DO_HWRESELECT);
@@ -1462,9 +1250,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
#endif
/*polling:*/
/* Send CDB ..command block ......... */
- dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
- srb->cmd->cmnd[0], srb->tag_number);
if (srb->flag & AUTO_REQSENSE) {
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
@@ -1486,8 +1271,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
* we caught an interrupt (must be reset or reselection ... )
* : Let's process it first!
*/
- dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
- srb->cmd, dcb->target_id, dcb->target_lun);
srb->state = SRB_READY;
free_tag(dcb, srb);
srb->msg_count = 0;
@@ -1551,14 +1334,6 @@ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
/* This acknowledges the IRQ */
scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
- if ((scsi_status & 0x2007) == 0x2002)
- dprintkl(KERN_DEBUG,
- "COP after COP completed? %04x\n", scsi_status);
- if (debug_enabled(DBG_KG)) {
- if (scsi_intstatus & INT_SELTIMEOUT)
- dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
- }
- /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
if (timer_pending(&acb->selto_timer))
timer_delete(&acb->selto_timer);
@@ -1571,27 +1346,21 @@ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
reselect(acb);
goto out_unlock;
}
- if (scsi_intstatus & INT_SELECT) {
- dprintkl(KERN_INFO, "Host does not support target mode!\n");
+ if (scsi_intstatus & INT_SELECT)
goto out_unlock;
- }
+
if (scsi_intstatus & INT_SCSIRESET) {
scsi_reset_detect(acb);
goto out_unlock;
}
if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
dcb = acb->active_dcb;
- if (!dcb) {
- dprintkl(KERN_DEBUG,
- "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
- scsi_status, scsi_intstatus);
+ if (!dcb)
goto out_unlock;
- }
+
srb = dcb->active_srb;
- if (dcb->flag & ABORT_DEV_) {
- dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
+ if (dcb->flag & ABORT_DEV_)
enable_msgout_abort(acb, srb);
- }
/* software sequential machine */
phase = (u16)srb->scsi_phase;
@@ -1659,9 +1428,7 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
}
else if (dma_status & 0x20) {
/* Error from the DMA engine */
- dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
#if 0
- dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
if (acb->active_dcb) {
acb->active_dcb-> flag |= ABORT_DEV_;
if (acb->active_dcb->active_srb)
@@ -1669,7 +1436,6 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
}
DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
#else
- dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
acb = NULL;
#endif
handled = IRQ_HANDLED;
@@ -1682,7 +1448,6 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
*pscsi_status = PH_BUS_FREE; /*.. initial phase */
@@ -1696,18 +1461,12 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
{
u16 i;
u8 *ptr;
- dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
clear_fifo(acb, "msgout_phase1");
- if (!(srb->state & SRB_MSGOUT)) {
+ if (!(srb->state & SRB_MSGOUT))
srb->state |= SRB_MSGOUT;
- dprintkl(KERN_DEBUG,
- "msgout_phase1: (0x%p) Phase unexpected\n",
- srb->cmd); /* So what ? */
- }
+
if (!srb->msg_count) {
- dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
- srb->cmd);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP);
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
/* it's important for atn stop */
@@ -1728,7 +1487,6 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
}
@@ -1739,7 +1497,6 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
struct DeviceCtlBlk *dcb;
u8 *ptr;
u16 i;
- dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
clear_fifo(acb, "command_phase1");
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
@@ -1768,26 +1525,6 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
/*
- * Verify that the remaining space in the hw sg lists is the same as
- * the count of remaining bytes in srb->total_xfer_length
- */
-static void sg_verify_length(struct ScsiReqBlk *srb)
-{
- if (debug_enabled(DBG_SG)) {
- unsigned len = 0;
- unsigned idx = srb->sg_index;
- struct SGentry *psge = srb->segment_x + idx;
- for (; idx < srb->sg_count; psge++, idx++)
- len += psge->length;
- if (len != srb->total_xfer_length)
- dprintkdbg(DBG_SG,
- "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
- srb->total_xfer_length, len);
- }
-}
-
-
-/*
* Compute the next Scatter Gather list index and adjust its length
* and address if necessary
*/
@@ -1797,15 +1534,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
u32 xferred = srb->total_xfer_length - left; /* bytes transferred */
struct SGentry *psge = srb->segment_x + srb->sg_index;
- dprintkdbg(DBG_0,
- "sg_update_list: Transferred %i of %i bytes, %i remain\n",
- xferred, srb->total_xfer_length, left);
if (xferred == 0) {
/* nothing to update since we did not transfer any data */
return;
}
- sg_verify_length(srb);
srb->total_xfer_length = left; /* update remaining count */
for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
if (xferred >= psge->length) {
@@ -1826,7 +1559,6 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
}
psge++;
}
- sg_verify_length(srb);
}
@@ -1882,8 +1614,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
struct DeviceCtlBlk *dcb = srb->dcb;
u16 scsi_status = *pscsi_status;
u32 d_left_counter = 0;
- dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
/*
* KG: We need to drain the buffers before we draw any conclusions!
@@ -1897,14 +1627,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
* KG: Stop DMA engine pushing more data into the SCSI FIFO
* If we need more data, the DMA SG list will be freshly set up, anyway
*/
- dprintkdbg(DBG_PIO, "data_out_phase0: "
- "DMA{fifocnt=0x%02x fifostat=0x%02x} "
- "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
- DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
- DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
- DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
- srb->total_xfer_length);
DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
if (!(srb->state & SRB_XFERPAD)) {
@@ -1928,16 +1650,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
if (dcb->sync_period & WIDE_SYNC)
d_left_counter <<= 1;
- dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
- "SCSI{fifocnt=0x%02x cnt=0x%08x} "
- "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
- DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
- (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
- DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
- DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
- DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
}
/*
* calculate all the residue data that not yet tranfered
@@ -1958,9 +1670,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
&& scsi_bufflen(srb->cmd) % 2) {
d_left_counter = 0;
- dprintkl(KERN_INFO,
- "data_out_phase0: Discard 1 byte (0x%02x)\n",
- scsi_status);
}
/*
* KG: Oops again. Same thinko as above: The SCSI might have been
@@ -1991,8 +1700,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
|| ((oldxferred & ~PAGE_MASK) ==
(PAGE_SIZE - diff))
) {
- dprintkl(KERN_INFO, "data_out_phase0: "
- "Work around chip bug (%i)?\n", diff);
d_left_counter =
srb->total_xfer_length - diff;
sg_update_list(srb, d_left_counter);
@@ -2003,17 +1710,14 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
}
}
}
- if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
+ if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT)
cleanup_after_transfer(acb, srb);
- }
}
static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
clear_fifo(acb, "data_out_phase1");
/* do prepare before transfer when data out phase */
data_io_transfer(acb, srb, XFERDATAOUT);
@@ -2024,8 +1728,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
{
u16 scsi_status = *pscsi_status;
- dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
/*
* KG: DataIn is much more tricky than DataOut. When the device is finished
@@ -2045,8 +1747,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
unsigned int sc, fc;
if (scsi_status & PARITYERROR) {
- dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
- "Parity Error\n", srb->cmd);
srb->status |= PARITY_ERROR;
}
/*
@@ -2058,26 +1758,14 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
#if 0
int ctr = 6000000;
- dprintkl(KERN_DEBUG,
- "DIP0: Wait for DMA FIFO to flush ...\n");
/*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */
/*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */
/*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */
while (!
(DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
0x80) && --ctr);
- if (ctr < 6000000 - 1)
- dprintkl(KERN_DEBUG
- "DIP0: Had to wait for DMA ...\n");
- if (!ctr)
- dprintkl(KERN_ERR,
- "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
/*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */
#endif
- dprintkdbg(DBG_KG, "data_in_phase0: "
- "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
- DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
}
/* Now: Check remainig data: The SCSI counters should tell us ... */
sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
@@ -2085,17 +1773,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
d_left_counter = sc + ((fc & 0x1f)
<< ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
0));
- dprintkdbg(DBG_KG, "data_in_phase0: "
- "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
- "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
- "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
- fc,
- (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
- sc,
- fc,
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
- DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
- srb->total_xfer_length, d_left_counter);
#if DC395x_LASTPIO
/* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */
if (d_left_counter
@@ -2104,12 +1781,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
/*u32 addr = (srb->segment_x[srb->sg_index].address); */
/*sg_update_list (srb, d_left_counter); */
- dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
- "for remaining %i bytes:",
- fc & 0x1f,
- (srb->dcb->sync_period & WIDE_SYNC) ?
- "words" : "bytes",
- srb->total_xfer_length);
if (srb->dcb->sync_period & WIDE_SYNC)
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
CFG2_WIDEFIFO);
@@ -2133,9 +1804,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
*virt++ = byte;
- if (debug_enabled(DBG_PIO))
- printk(" %02x", byte);
-
d_left_counter--;
sg_subtract_one(srb);
@@ -2158,8 +1826,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
*virt++ = byte;
srb->total_xfer_length--;
- if (debug_enabled(DBG_PIO))
- printk(" %02x", byte);
}
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
@@ -2168,10 +1834,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
scsi_kunmap_atomic_sg(base);
local_irq_restore(flags);
}
- /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
/*srb->total_xfer_length = 0; */
- if (debug_enabled(DBG_PIO))
- printk("\n");
}
#endif /* DC395x_LASTPIO */
@@ -2207,9 +1870,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
TempDMAstatus =
DC395x_read8(acb, TRM_S1040_DMA_STATUS);
} while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
- if (!ctr)
- dprintkl(KERN_ERR,
- "Deadlock in DataInPhase0 waiting for DMA!!\n");
srb->total_xfer_length = 0;
#endif
srb->total_xfer_length = d_left_counter;
@@ -2226,17 +1886,14 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
}
}
/* KG: The target may decide to disconnect: Empty FIFO before! */
- if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
+ if ((*pscsi_status & PHASEMASK) != PH_DATA_IN)
cleanup_after_transfer(acb, srb);
- }
}
static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
data_io_transfer(acb, srb, XFERDATAIN);
}
@@ -2246,13 +1903,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
{
struct DeviceCtlBlk *dcb = srb->dcb;
u8 bval;
- dprintkdbg(DBG_0,
- "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
- ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
- srb->total_xfer_length, srb->sg_index, srb->sg_count);
- if (srb == acb->tmp_srb)
- dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
+
if (srb->sg_index >= srb->sg_count) {
/* can't happen? out of bounds error */
return;
@@ -2265,9 +1916,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
* Maybe, even ABORTXFER would be appropriate
*/
if (dma_status & XFERPENDING) {
- dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
- "Expect trouble!\n");
- dump_register_info(acb, dcb, srb);
DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
}
/* clear_fifo(acb, "IO"); */
@@ -2346,9 +1994,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
left_io -= len;
while (len--) {
- if (debug_enabled(DBG_PIO))
- printk(" %02x", *virt);
-
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
sg_subtract_one(srb);
@@ -2360,14 +2005,10 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
if (srb->dcb->sync_period & WIDE_SYNC) {
if (ln % 2) {
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
- if (debug_enabled(DBG_PIO))
- printk(" |00");
}
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
}
/*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */
- if (debug_enabled(DBG_PIO))
- printk("\n");
DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
SCMD_FIFO_OUT);
}
@@ -2419,8 +2060,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
srb->state = SRB_COMPLETED;
@@ -2433,8 +2072,6 @@ static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
srb->state = SRB_STATUS;
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
@@ -2464,9 +2101,6 @@ static inline void msgin_reject(struct AdapterCtlBlk *acb,
DC395x_ENABLE_MSGOUT;
srb->state &= ~SRB_MSGIN;
srb->state |= SRB_MSGOUT;
- dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
- srb->msgin_buf[0],
- srb->dcb->target_id, srb->dcb->target_lun);
}
@@ -2475,13 +2109,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
{
struct ScsiReqBlk *srb = NULL;
struct ScsiReqBlk *i;
- dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
- srb->cmd, tag, srb);
-
- if (!(dcb->tag_mask & (1 << tag)))
- dprintkl(KERN_DEBUG,
- "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
- dcb->tag_mask, tag);
if (list_empty(&dcb->srb_going_list))
goto mingx0;
@@ -2494,8 +2121,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
if (!srb)
goto mingx0;
- dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
if (dcb->flag & ABORT_DEV_) {
/*srb->state = SRB_ABORT_SENT; */
enable_msgout_abort(acb, srb);
@@ -2518,7 +2143,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
srb->msgout_buf[0] = ABORT_TASK;
srb->msg_count = 1;
DC395x_ENABLE_MSGOUT;
- dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
return srb;
}
@@ -2537,8 +2161,6 @@ static inline void reprogram_regs(struct AdapterCtlBlk *acb,
static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
{
struct DeviceCtlBlk *dcb = srb->dcb;
- dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
dcb->sync_mode |= SYNC_NEGO_DONE;
@@ -2551,7 +2173,6 @@ static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
&& !(dcb->sync_mode & WIDE_NEGO_DONE)) {
build_wdtr(acb, dcb, srb);
DC395x_ENABLE_MSGOUT;
- dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
}
}
@@ -2562,12 +2183,6 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
struct DeviceCtlBlk *dcb = srb->dcb;
u8 bval;
int fact;
- dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
- "(%02i.%01i MHz) Offset %i\n",
- dcb->target_id, srb->msgin_buf[3] << 2,
- (250 / srb->msgin_buf[3]),
- ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
- srb->msgin_buf[4]);
if (srb->msgin_buf[4] > 15)
srb->msgin_buf[4] = 15;
@@ -2584,10 +2199,7 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
|| dcb->min_nego_period >
clock_period[bval]))
bval++;
- if (srb->msgin_buf[3] < clock_period[bval])
- dprintkl(KERN_INFO,
- "msgin_set_sync: Increase sync nego period to %ins\n",
- clock_period[bval] << 2);
+
srb->msgin_buf[3] = clock_period[bval];
dcb->sync_period &= 0xf0;
dcb->sync_period |= ALT_SYNC | bval;
@@ -2598,18 +2210,8 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
else
fact = 250;
- dprintkl(KERN_INFO,
- "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
- dcb->target_id, (fact == 500) ? "Wide16" : "",
- dcb->min_nego_period << 2, dcb->sync_offset,
- (fact / dcb->min_nego_period),
- ((fact % dcb->min_nego_period) * 10 +
- dcb->min_nego_period / 2) / dcb->min_nego_period);
-
if (!(srb->state & SRB_DO_SYNC_NEGO)) {
/* Reply with corrected SDTR Message */
- dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
- srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
memcpy(srb->msgout_buf, srb->msgin_buf, 5);
srb->msg_count = 5;
@@ -2620,7 +2222,6 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
&& !(dcb->sync_mode & WIDE_NEGO_DONE)) {
build_wdtr(acb, dcb, srb);
DC395x_ENABLE_MSGOUT;
- dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
}
}
srb->state &= ~SRB_DO_SYNC_NEGO;
@@ -2634,7 +2235,6 @@ static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
struct ScsiReqBlk *srb)
{
struct DeviceCtlBlk *dcb = srb->dcb;
- dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
dcb->sync_period &= ~WIDE_SYNC;
dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
@@ -2645,7 +2245,6 @@ static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
&& !(dcb->sync_mode & SYNC_NEGO_DONE)) {
build_sdtr(acb, dcb, srb);
DC395x_ENABLE_MSGOUT;
- dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
}
}
@@ -2654,15 +2253,11 @@ static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
struct DeviceCtlBlk *dcb = srb->dcb;
u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
&& acb->config & HCC_WIDE_CARD) ? 1 : 0;
- dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
if (srb->msgin_buf[3] > wide)
srb->msgin_buf[3] = wide;
/* Completed */
if (!(srb->state & SRB_DO_WIDE_NEGO)) {
- dprintkl(KERN_DEBUG,
- "msgin_set_wide: Wide nego initiated <%02i>\n",
- dcb->target_id);
memcpy(srb->msgout_buf, srb->msgin_buf, 4);
srb->msg_count = 4;
srb->state |= SRB_DO_WIDE_NEGO;
@@ -2676,15 +2271,11 @@ static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
dcb->sync_period &= ~WIDE_SYNC;
srb->state &= ~SRB_DO_WIDE_NEGO;
/*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
- dprintkdbg(DBG_1,
- "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
- (8 << srb->msgin_buf[3]), dcb->target_id);
reprogram_regs(acb, dcb);
if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
&& !(dcb->sync_mode & SYNC_NEGO_DONE)) {
build_sdtr(acb, dcb, srb);
DC395x_ENABLE_MSGOUT;
- dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
}
}
@@ -2705,7 +2296,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
struct DeviceCtlBlk *dcb = acb->active_dcb;
- dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
@@ -2759,7 +2349,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
case IGNORE_WIDE_RESIDUE:
/* Discard wide residual */
- dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
break;
case COMMAND_COMPLETE:
@@ -2771,20 +2360,12 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
* SAVE POINTER may be ignored as we have the struct
* ScsiReqBlk* associated with the scsi command.
*/
- dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
- "SAVE POINTER rem=%i Ignore\n",
- srb->cmd, srb->total_xfer_length);
break;
case RESTORE_POINTERS:
- dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
break;
case ABORT:
- dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
- "<%02i-%i> ABORT msg\n",
- srb->cmd, dcb->target_id,
- dcb->target_lun);
dcb->flag |= ABORT_DEV_;
enable_msgout_abort(acb, srb);
break;
@@ -2792,7 +2373,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
default:
/* reject unknown messages */
if (srb->msgin_buf[0] & IDENTIFY_BASE) {
- dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
srb->msg_count = 1;
srb->msgout_buf[0] = dcb->identify_msg;
DC395x_ENABLE_MSGOUT;
@@ -2815,7 +2395,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
clear_fifo(acb, "msgin_phase1");
DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
if (!(srb->state & SRB_MSGIN)) {
@@ -2869,7 +2448,6 @@ static void disconnect(struct AdapterCtlBlk *acb)
struct ScsiReqBlk *srb;
if (!dcb) {
- dprintkl(KERN_ERR, "disconnect: No such device\n");
udelay(500);
/* Suspend queue for a while */
acb->last_reset =
@@ -2881,21 +2459,16 @@ static void disconnect(struct AdapterCtlBlk *acb)
}
srb = dcb->active_srb;
acb->active_dcb = NULL;
- dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
srb->scsi_phase = PH_BUS_FREE; /* initial phase */
clear_fifo(acb, "disconnect");
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
if (srb->state & SRB_UNEXPECT_RESEL) {
- dprintkl(KERN_ERR,
- "disconnect: Unexpected reselection <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
srb->state = 0;
waiting_process_next(acb);
} else if (srb->state & SRB_ABORT_SENT) {
dcb->flag &= ~ABORT_DEV_;
acb->last_reset = jiffies + HZ / 2 + 1;
- dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
waiting_process_next(acb);
} else {
@@ -2910,16 +2483,10 @@ static void disconnect(struct AdapterCtlBlk *acb)
if (srb->state != SRB_START_
&& srb->state != SRB_MSGOUT) {
srb->state = SRB_READY;
- dprintkl(KERN_DEBUG,
- "disconnect: (0x%p) Unexpected\n",
- srb->cmd);
srb->target_status = SCSI_STAT_SEL_TIMEOUT;
goto disc1;
} else {
/* Normal selection timeout */
- dprintkdbg(DBG_KG, "disconnect: (0x%p) "
- "<%02i-%i> SelTO\n", srb->cmd,
- dcb->target_id, dcb->target_lun);
if (srb->retry_count++ > DC395x_MAX_RETRIES
|| acb->scan_devices) {
srb->target_status =
@@ -2928,9 +2495,6 @@ static void disconnect(struct AdapterCtlBlk *acb)
}
free_tag(dcb, srb);
list_move(&srb->list, &dcb->srb_waiting_list);
- dprintkdbg(DBG_KG,
- "disconnect: (0x%p) Retry\n",
- srb->cmd);
waiting_set_timer(acb, HZ / 20);
}
} else if (srb->state & SRB_DISCONNECT) {
@@ -2939,9 +2503,6 @@ static void disconnect(struct AdapterCtlBlk *acb)
* SRB_DISCONNECT (This is what we expect!)
*/
if (bval & 0x40) {
- dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
- " 0x%02x: ACK set! Other controllers?\n",
- bval);
/* It could come from another initiator, therefore don't do much ! */
} else
waiting_process_next(acb);
@@ -2965,7 +2526,6 @@ static void reselect(struct AdapterCtlBlk *acb)
struct ScsiReqBlk *srb = NULL;
u16 rsel_tar_lun_id;
u8 id, lun;
- dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
clear_fifo(acb, "reselect");
/*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */
@@ -2974,18 +2534,11 @@ static void reselect(struct AdapterCtlBlk *acb)
if (dcb) { /* Arbitration lost but Reselection win */
srb = dcb->active_srb;
if (!srb) {
- dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
- "but active_srb == NULL\n");
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
return;
}
/* Why the if ? */
if (!acb->scan_devices) {
- dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
- "Arb lost but Resel win rsel=%i stat=0x%04x\n",
- srb->cmd, dcb->target_id,
- dcb->target_lun, rsel_tar_lun_id,
- DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
/*srb->state |= SRB_DISCONNECT; */
srb->state = SRB_READY;
@@ -2997,25 +2550,15 @@ static void reselect(struct AdapterCtlBlk *acb)
}
}
/* Read Reselected Target Id and LUN */
- if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
- dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
- "Got %i!\n", rsel_tar_lun_id);
id = rsel_tar_lun_id & 0xff;
lun = (rsel_tar_lun_id >> 8) & 7;
dcb = find_dcb(acb, id, lun);
if (!dcb) {
- dprintkl(KERN_ERR, "reselect: From non existent device "
- "<%02i-%i>\n", id, lun);
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
return;
}
acb->active_dcb = dcb;
- if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
- dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
- "disconnection? <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
-
if (dcb->sync_mode & EN_TAG_QUEUEING) {
srb = acb->tmp_srb;
dcb->active_srb = srb;
@@ -3026,9 +2569,6 @@ static void reselect(struct AdapterCtlBlk *acb)
/*
* abort command
*/
- dprintkl(KERN_DEBUG,
- "reselect: w/o disconnected cmds <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
srb = acb->tmp_srb;
srb->state = SRB_UNEXPECT_RESEL;
dcb->active_srb = srb;
@@ -3045,7 +2585,6 @@ static void reselect(struct AdapterCtlBlk *acb)
srb->scsi_phase = PH_BUS_FREE; /* initial phase */
/* Program HA ID, target ID, period and offset */
- dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */
DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
@@ -3111,12 +2650,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
if (scsi_sg_count(cmd) && dir != DMA_NONE) {
/* unmap DC395x SG list */
- dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
- srb->sg_bus_addr, SEGMENTX_LEN);
dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
DMA_TO_DEVICE);
- dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
- scsi_sg_count(cmd), scsi_bufflen(cmd));
/* unmap the sg segments */
scsi_dma_unmap(cmd);
}
@@ -3130,8 +2665,6 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
if (!(srb->flag & AUTO_REQSENSE))
return;
/* Unmap sense buffer */
- dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
- srb->segment_x[0].address);
dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
srb->segment_x[0].length, DMA_FROM_DEVICE);
/* Restore SG stuff */
@@ -3155,16 +2688,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
enum dma_data_direction dir = cmd->sc_data_direction;
int ckc_only = 1;
- dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
- srb->cmd->device->id, (u8)srb->cmd->device->lun);
- dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
- srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
- scsi_sgtalbe(cmd));
status = srb->target_status;
set_host_byte(cmd, DID_OK);
set_status_byte(cmd, SAM_STAT_GOOD);
if (srb->flag & AUTO_REQSENSE) {
- dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
pci_unmap_srb_sense(acb, srb);
/*
** target status..........................
@@ -3172,57 +2699,11 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->flag &= ~AUTO_REQSENSE;
srb->adapter_status = 0;
srb->target_status = SAM_STAT_CHECK_CONDITION;
- if (debug_enabled(DBG_1)) {
- switch (cmd->sense_buffer[2] & 0x0f) {
- case NOT_READY:
- dprintkl(KERN_DEBUG,
- "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- case UNIT_ATTENTION:
- dprintkl(KERN_DEBUG,
- "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- case ILLEGAL_REQUEST:
- dprintkl(KERN_DEBUG,
- "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- case MEDIUM_ERROR:
- dprintkl(KERN_DEBUG,
- "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- case HARDWARE_ERROR:
- dprintkl(KERN_DEBUG,
- "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- }
- if (cmd->sense_buffer[7] >= 6)
- printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
- "(0x%08x 0x%08x)\n",
- cmd->sense_buffer[2], cmd->sense_buffer[12],
- cmd->sense_buffer[13],
- *((unsigned int *)(cmd->sense_buffer + 3)),
- *((unsigned int *)(cmd->sense_buffer + 8)));
- else
- printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
- cmd->sense_buffer[2],
- *((unsigned int *)(cmd->sense_buffer + 3)));
- }
if (status == SAM_STAT_CHECK_CONDITION) {
set_host_byte(cmd, DID_BAD_TARGET);
goto ckc_e;
}
- dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
@@ -3239,8 +2720,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
return;
} else if (status == SAM_STAT_TASK_SET_FULL) {
tempcnt = (u8)list_size(&dcb->srb_going_list);
- dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
- dcb->target_id, dcb->target_lun, tempcnt);
if (tempcnt > 1)
tempcnt--;
dcb->max_command = tempcnt;
@@ -3314,21 +2793,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/* Here is the info for Doug Gilbert's sg3 ... */
scsi_set_resid(cmd, srb->total_xfer_length);
- if (debug_enabled(DBG_KG)) {
- if (srb->total_xfer_length)
- dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
- "cmnd=0x%02x Missed %i bytes\n",
- cmd, cmd->device->id, (u8)cmd->device->lun,
- cmd->cmnd[0], srb->total_xfer_length);
- }
if (srb != acb->tmp_srb) {
/* Add to free list */
- dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
- cmd, cmd->result);
list_move_tail(&srb->list, &acb->srb_free_list);
- } else {
- dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
}
scsi_done(cmd);
@@ -3341,7 +2809,6 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
struct scsi_cmnd *cmd, u8 force)
{
struct DeviceCtlBlk *dcb;
- dprintkl(KERN_INFO, "doing_srb_done: pids ");
list_for_each_entry(dcb, &acb->dcb_list, list) {
struct ScsiReqBlk *srb;
@@ -3365,15 +2832,6 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
scsi_done(p);
}
}
- if (!list_empty(&dcb->srb_going_list))
- dprintkl(KERN_DEBUG,
- "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
- if (dcb->tag_mask)
- dprintkl(KERN_DEBUG,
- "tag_mask for <%02i-%i> should be empty, is %08x!\n",
- dcb->target_id, dcb->target_lun,
- dcb->tag_mask);
/* Waiting queue */
list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
@@ -3392,19 +2850,13 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
scsi_done(cmd);
}
}
- if (!list_empty(&dcb->srb_waiting_list))
- dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
- list_size(&dcb->srb_waiting_list), dcb->target_id,
- dcb->target_lun);
dcb->flag &= ~ABORT_DEV_;
}
- printk("\n");
}
static void reset_scsi_bus(struct AdapterCtlBlk *acb)
{
- dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
@@ -3451,7 +2903,6 @@ static void set_basic_config(struct AdapterCtlBlk *acb)
static void scsi_reset_detect(struct AdapterCtlBlk *acb)
{
- dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
/* delay half a second */
if (timer_pending(&acb->waiting_timer))
timer_delete(&acb->waiting_timer);
@@ -3488,8 +2939,6 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb)
{
struct scsi_cmnd *cmd = srb->cmd;
- dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
- cmd, cmd->device->id, (u8)cmd->device->lun);
srb->flag |= AUTO_REQSENSE;
srb->adapter_status = 0;
@@ -3511,16 +2960,10 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
- dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
- cmd->sense_buffer, srb->segment_x[0].address,
- SCSI_SENSE_BUFFERSIZE);
srb->sg_count = 1;
srb->sg_index = 0;
if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
- dprintkl(KERN_DEBUG,
- "request_sense: (0x%p) failed <%02i-%i>\n",
- srb->cmd, dcb->target_id, dcb->target_lun);
list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 100);
}
@@ -3548,7 +2991,6 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
struct DeviceCtlBlk *dcb;
dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
- dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
if (!dcb)
return NULL;
dcb->acb = NULL;
@@ -3598,10 +3040,6 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
return NULL;
}
- dprintkdbg(DBG_1,
- "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
- dcb->target_id, dcb->target_lun,
- p->target_id, p->target_lun);
dcb->sync_mode = p->sync_mode;
dcb->sync_period = p->sync_period;
dcb->min_nego_period = p->min_nego_period;
@@ -3651,8 +3089,6 @@ static void adapter_remove_device(struct AdapterCtlBlk *acb,
{
struct DeviceCtlBlk *i;
struct DeviceCtlBlk *tmp;
- dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
/* fix up any pointers to this device that we have in the adapter */
if (acb->active_dcb == dcb)
@@ -3685,10 +3121,6 @@ static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
struct DeviceCtlBlk *dcb)
{
if (list_size(&dcb->srb_going_list) > 1) {
- dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
- "Won't remove because of %i active requests.\n",
- dcb->target_id, dcb->target_lun,
- list_size(&dcb->srb_going_list));
return;
}
adapter_remove_device(acb, dcb);
@@ -3706,8 +3138,6 @@ static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
{
struct DeviceCtlBlk *dcb;
struct DeviceCtlBlk *tmp;
- dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
- list_size(&acb->dcb_list));
list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
adapter_remove_and_free_device(acb, dcb);
@@ -4002,8 +3432,6 @@ static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
* Checksum is wrong.
* Load a set of defaults into the eeprom buffer
*/
- dprintkl(KERN_WARNING,
- "EEProm checksum error: using default values and options.\n");
eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
@@ -4055,15 +3483,6 @@ static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
**/
static void print_eeprom_settings(struct NvRamType *eeprom)
{
- dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
- eeprom->scsi_id,
- eeprom->target[0].period,
- clock_speed[eeprom->target[0].period] / 10,
- clock_speed[eeprom->target[0].period] % 10,
- eeprom->target[0].cfg0);
- dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
- eeprom->channel_cfg, eeprom->max_tag,
- 1 << eeprom->max_tag, eeprom->delay_time);
}
@@ -4094,15 +3513,12 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;
- dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
while (pages--) {
ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ptr) {
adapter_sg_tables_free(acb);
return 1;
}
- dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
- PAGE_SIZE, ptr, srb_idx);
i = 0;
while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
acb->srb_array[srb_idx++].segment_x =
@@ -4111,8 +3527,6 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
if (i < srbs_per_page)
acb->srb.segment_x =
ptr + (i * DC395x_MAX_SG_LISTENTRY);
- else
- dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
return 0;
}
@@ -4132,8 +3546,6 @@ static void adapter_print_config(struct AdapterCtlBlk *acb)
u8 bval;
bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
- dprintkl(KERN_INFO, "%sConnectors: ",
- ((bval & WIDESCSI) ? "(Wide) " : ""));
if (!(bval & CON5068))
printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
if (!(bval & CON68))
@@ -4293,7 +3705,6 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)
acb->config |= HCC_SCSI_RESET;
if (acb->config & HCC_SCSI_RESET) {
- dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
/*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */
@@ -4327,7 +3738,6 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
u32 io_port_len, unsigned int irq)
{
if (!request_region(io_port, io_port_len, DC395X_NAME)) {
- dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
goto failed;
}
/* store port base to indicate we have registered it */
@@ -4336,7 +3746,6 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
/* release the region we just claimed */
- dprintkl(KERN_INFO, "Failed to register IRQ\n");
goto failed;
}
/* store irq to indicate we have registered it */
@@ -4353,18 +3762,12 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
adapter_print_config(acb);
if (adapter_sg_tables_alloc(acb)) {
- dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
goto failed;
}
adapter_init_scsi_host(acb->scsi_host);
adapter_init_chip(acb);
set_basic_config(acb);
- dprintkdbg(DBG_0,
- "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
- "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
- acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
- sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
return 0;
failed:
@@ -4528,14 +3931,6 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
seq_putc(m, '\n');
}
- if (debug_enabled(DBG_1)) {
- seq_printf(m, "DCB list for ACB %p:\n", acb);
- list_for_each_entry(dcb, &acb->dcb_list, list) {
- seq_printf(m, "%p -> ", dcb);
- }
- seq_puts(m, "END\n");
- }
-
DC395x_UNLOCK_IO(acb->scsi_host, flags);
return 0;
}
@@ -4560,21 +3955,6 @@ static const struct scsi_host_template dc395x_driver_template = {
/**
- * banner_display - Display banner on first instance of driver
- * initialized.
- **/
-static void banner_display(void)
-{
- static int banner_done = 0;
- if (!banner_done)
- {
- dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
- banner_done = 1;
- }
-}
-
-
-/**
* dc395x_init_one - Initialise a single instance of the adapter.
*
* The PCI layer will call this once for each instance of the adapter
@@ -4595,33 +3975,25 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
unsigned int io_port_len;
unsigned int irq;
- dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
- banner_display();
-
if (pci_enable_device(dev))
- {
- dprintkl(KERN_INFO, "PCI Enable device failed.\n");
return -ENODEV;
- }
+
io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
io_port_len = pci_resource_len(dev, 0);
irq = dev->irq;
- dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
/* allocate scsi host information (includes out adapter) */
scsi_host = scsi_host_alloc(&dc395x_driver_template,
sizeof(struct AdapterCtlBlk));
- if (!scsi_host) {
- dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
+ if (!scsi_host)
goto fail;
- }
+
acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
acb->scsi_host = scsi_host;
acb->dev = dev;
/* initialise the adapter and everything we need */
if (adapter_init(acb, io_port_base, io_port_len, irq)) {
- dprintkl(KERN_INFO, "adapter init failed\n");
acb = NULL;
goto fail;
}
@@ -4629,10 +4001,9 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_master(dev);
/* get the scsi mid level to scan for new devices on the bus */
- if (scsi_add_host(scsi_host, &dev->dev)) {
- dprintkl(KERN_ERR, "scsi_add_host failed\n");
+ if (scsi_add_host(scsi_host, &dev->dev))
goto fail;
- }
+
pci_set_drvdata(dev, scsi_host);
scsi_scan_host(scsi_host);
@@ -4659,8 +4030,6 @@ static void dc395x_remove_one(struct pci_dev *dev)
struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
- dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
-
scsi_remove_host(scsi_host);
adapter_uninit(acb);
pci_disable_device(dev);
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index 5e7fb110bc3f..d9a231fc0e0d 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -3804,7 +3804,7 @@ sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
wr_obj->desired_write_len_dword = cpu_to_le32(dwflags);
wr_obj->write_offset = cpu_to_le32(offset);
- strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1);
+ strscpy(wr_obj->object_name, obj_name);
wr_obj->host_buffer_descriptor_count = cpu_to_le32(1);
bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor;
@@ -3833,7 +3833,7 @@ sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name)
SLI4_SUBSYSTEM_COMMON, CMD_V0,
SLI4_RQST_PYLD_LEN(cmn_delete_object));
- strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1);
+ strscpy(req->object_name, obj_name);
return 0;
}
@@ -3856,7 +3856,7 @@ sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len,
cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN);
rd_obj->read_offset = cpu_to_le32(offset);
- strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1);
+ strscpy(rd_obj->object_name, obj_name);
rd_obj->host_buffer_descriptor_count = cpu_to_le32(1);
bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor;
diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c
index 6e7c0b00eb41..19395e2aee44 100644
--- a/drivers/scsi/fnic/fip.c
+++ b/drivers/scsi/fnic/fip.c
@@ -200,7 +200,7 @@ void fnic_fcoe_start_fcf_discovery(struct fnic *fnic)
return;
}
- memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+ eth_zero_addr(iport->selected_fcf.fcf_mac);
pdisc_sol = (struct fip_discovery *) frame;
*pdisc_sol = (struct fip_discovery) {
@@ -588,12 +588,12 @@ void fnic_common_fip_cleanup(struct fnic *fnic)
if (!is_zero_ether_addr(iport->fpma))
vnic_dev_del_addr(fnic->vdev, iport->fpma);
- memset(iport->fpma, 0, ETH_ALEN);
+ eth_zero_addr(iport->fpma);
iport->fcid = 0;
iport->r_a_tov = 0;
iport->e_d_tov = 0;
- memset(fnic->iport.fcfmac, 0, ETH_ALEN);
- memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+ eth_zero_addr(fnic->iport.fcfmac);
+ eth_zero_addr(iport->selected_fcf.fcf_mac);
iport->selected_fcf.fcf_priority = 0;
iport->selected_fcf.fka_adv_period = 0;
iport->selected_fcf.ka_disabled = 0;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index e17f5d8226bf..1323ed8aa717 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -46,6 +46,13 @@
#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10
#define HISI_SAS_FIFO_DATA_DW_SIZE 32
+#define HISI_SAS_REG_MEM_SIZE 4
+#define HISI_SAS_MAX_CDB_LEN 16
+#define HISI_SAS_BLK_QUEUE_DEPTH 64
+
+#define BYTE_TO_DW 4
+#define BYTE_TO_DDW 8
+
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -92,6 +99,8 @@
#define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ)
#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
+#define HISI_SAS_DELAY_FOR_PHY_DISABLE 100
+#define NAME_BUF_SIZE 256
struct hisi_hba;
@@ -167,6 +176,8 @@ struct hisi_sas_debugfs_fifo {
u32 rd_data[HISI_SAS_FIFO_DATA_DW_SIZE];
};
+#define FRAME_RCVD_BUF 32
+#define SAS_PHY_RESV_SIZE 2
struct hisi_sas_phy {
struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
@@ -178,10 +189,10 @@ struct hisi_sas_phy {
spinlock_t lock;
u64 port_id; /* from hw */
u64 frame_rcvd_size;
- u8 frame_rcvd[32];
+ u8 frame_rcvd[FRAME_RCVD_BUF];
u8 phy_attached;
u8 in_reset;
- u8 reserved[2];
+ u8 reserved[SAS_PHY_RESV_SIZE];
u32 phy_type;
u32 code_violation_err_count;
enum sas_linkrate minimum_linkrate;
@@ -348,7 +359,8 @@ struct hisi_sas_hw {
const struct scsi_host_template *sht;
};
-#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
+#define HISI_SAS_MAX_DEBUGFS_DUMP 50
+#define HISI_SAS_DEFAULT_DEBUGFS_DUMP 1
struct hisi_sas_debugfs_cq {
struct hisi_sas_cq *cq;
@@ -448,12 +460,12 @@ struct hisi_hba {
dma_addr_t sata_breakpoint_dma;
struct hisi_sas_slot *slot_info;
unsigned long flags;
- const struct hisi_sas_hw *hw; /* Low level hw interface */
+ const struct hisi_sas_hw *hw; /* Low level hw interface */
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
struct work_struct rst_work;
u32 phy_state;
- u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
- u32 intr_coal_count; /* Interrupt count to coalesce */
+ u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
+ u32 intr_coal_count; /* Interrupt count to coalesce */
int cq_nvecs;
@@ -528,12 +540,13 @@ struct hisi_sas_cmd_hdr {
__le64 dif_prd_table_addr;
};
+#define ITCT_RESV_DDW 12
struct hisi_sas_itct {
__le64 qw0;
__le64 sas_addr;
__le64 qw2;
__le64 qw3;
- __le64 qw4_15[12];
+ __le64 qw4_15[ITCT_RESV_DDW];
};
struct hisi_sas_iost {
@@ -543,22 +556,26 @@ struct hisi_sas_iost {
__le64 qw3;
};
+#define ERROR_RECORD_BUF_DW 4
struct hisi_sas_err_record {
- u32 data[4];
+ u32 data[ERROR_RECORD_BUF_DW];
};
+#define FIS_RESV_DW 3
struct hisi_sas_initial_fis {
struct hisi_sas_err_record err_record;
struct dev_to_host_fis fis;
- u32 rsvd[3];
+ u32 rsvd[FIS_RESV_DW];
};
+#define BREAKPOINT_DATA_SIZE 128
struct hisi_sas_breakpoint {
- u8 data[128];
+ u8 data[BREAKPOINT_DATA_SIZE];
};
+#define BREAKPOINT_TAG_NUM 32
struct hisi_sas_sata_breakpoint {
- struct hisi_sas_breakpoint tag[32];
+ struct hisi_sas_breakpoint tag[BREAKPOINT_TAG_NUM];
};
struct hisi_sas_sge {
@@ -569,13 +586,15 @@ struct hisi_sas_sge {
__le32 data_off;
};
+#define SMP_CMD_TABLE_SIZE 44
struct hisi_sas_command_table_smp {
- u8 bytes[44];
+ u8 bytes[SMP_CMD_TABLE_SIZE];
};
+#define DUMMY_BUF_SIZE 12
struct hisi_sas_command_table_stp {
struct host_to_dev_fis command_fis;
- u8 dummy[12];
+ u8 dummy[DUMMY_BUF_SIZE];
u8 atapi_cdb[ATAPI_CDB_LEN];
};
@@ -589,12 +608,13 @@ struct hisi_sas_sge_dif_page {
struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT];
} __aligned(16);
+#define PROT_BUF_SIZE 7
struct hisi_sas_command_table_ssp {
struct ssp_frame_hdr hdr;
union {
struct {
struct ssp_command_iu task;
- u32 prot[7];
+ u32 prot[PROT_BUF_SIZE];
};
struct ssp_tmf_iu ssp_task;
struct xfer_rdy_iu xfer_rdy;
@@ -608,9 +628,10 @@ union hisi_sas_command_table {
struct hisi_sas_command_table_stp stp;
} __aligned(16);
+#define IU_BUF_SIZE 1024
struct hisi_sas_status_buffer {
struct hisi_sas_err_record err;
- u8 iu[1024];
+ u8 iu[IU_BUF_SIZE];
} __aligned(16);
struct hisi_sas_slot_buf_table {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 944cf2fb0561..4864e957be0b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -7,6 +7,16 @@
#include "hisi_sas.h"
#define DRV_NAME "hisi_sas"
+#define LINK_RATE_BIT_MASK 2
+#define FIS_BUF_SIZE 20
+#define WAIT_CMD_COMPLETE_DELAY 100
+#define WAIT_CMD_COMPLETE_TMROUT 5000
+#define DELAY_FOR_LINK_READY 2000
+#define BLK_CNT_OPTIMIZE_MARK 64
+#define HZ_TO_MHZ 1000000
+#define DELAY_FOR_SOFTRESET_MAX 1000
+#define DELAY_FOR_SOFTRESET_MIN 900
+
#define DEV_IS_GONE(dev) \
((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
@@ -114,12 +124,10 @@ u8 hisi_sas_get_ata_protocol(struct sas_task *task)
}
default:
- {
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
return hisi_sas_get_ata_protocol_from_tf(qc);
}
- }
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
@@ -131,7 +139,7 @@ void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_status_buffer *status_buf =
hisi_sas_status_buf_addr_mem(slot);
u8 *iu = &status_buf->iu[0];
- struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
+ struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
resp->frame_len = sizeof(struct dev_to_host_fis);
memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
@@ -151,7 +159,7 @@ u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
max -= SAS_LINK_RATE_1_5_GBPS;
for (i = 0; i <= max; i++)
- rate |= 1 << (i * 2);
+ rate |= 1 << (i * LINK_RATE_BIT_MASK);
return rate;
}
EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
@@ -900,7 +908,7 @@ int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
if (ret)
return ret;
if (!dev_is_sata(dev))
- sas_change_queue_depth(sdev, 64);
+ sas_change_queue_depth(sdev, HISI_SAS_BLK_QUEUE_DEPTH);
return 0;
}
@@ -1262,7 +1270,7 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
sas_phy->phy->minimum_linkrate = min;
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
- msleep(100);
+ msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE);
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
@@ -1292,7 +1300,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_LINK_RESET:
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
- msleep(100);
+ msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE);
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
break;
@@ -1347,7 +1355,7 @@ static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
static int hisi_sas_softreset_ata_disk(struct domain_device *device)
{
- u8 fis[20] = {0};
+ u8 fis[FIS_BUF_SIZE] = {0};
struct ata_port *ap = device->sata_dev.ap;
struct ata_link *link;
int rc = TMF_RESP_FUNC_FAILED;
@@ -1364,7 +1372,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
}
if (rc == TMF_RESP_FUNC_COMPLETE) {
- usleep_range(900, 1000);
+ usleep_range(DELAY_FOR_SOFTRESET_MIN, DELAY_FOR_SOFTRESET_MAX);
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
@@ -1494,7 +1502,7 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
struct ata_link *link;
- u8 fis[20] = {0};
+ u8 fis[FIS_BUF_SIZE] = {0};
int i;
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -1561,7 +1569,9 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
- hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
+ hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba,
+ WAIT_CMD_COMPLETE_DELAY,
+ WAIT_CMD_COMPLETE_TMROUT);
/*
* hisi_hba->timer is only used for v1/v2 hw, and check hw->sht
@@ -1862,7 +1872,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
smp_ata_check_ready_type);
} else {
- msleep(2000);
+ msleep(DELAY_FOR_LINK_READY);
}
return rc;
@@ -1885,33 +1895,14 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
}
hisi_sas_dereg_device(hisi_hba, device);
- rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
- struct sas_phy *local_phy;
-
+ if (dev_is_sata(device)) {
rc = hisi_sas_softreset_ata_disk(device);
- switch (rc) {
- case -ECOMM:
- rc = -ENODEV;
- break;
- case TMF_RESP_FUNC_FAILED:
- case -EMSGSIZE:
- case -EIO:
- local_phy = sas_get_local_phy(device);
- rc = sas_phy_enable(local_phy, 0);
- if (!rc) {
- local_phy->enabled = 0;
- dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
- SAS_ADDR(device->sas_addr), rc);
- rc = -ENODEV;
- }
- sas_put_local_phy(local_phy);
- break;
- default:
- break;
- }
+ if (rc == TMF_RESP_FUNC_FAILED)
+ dev_err(dev, "ata disk %016llx reset (%d)\n",
+ SAS_ADDR(device->sas_addr), rc);
}
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
hisi_sas_release_task(hisi_hba, device);
@@ -1934,12 +1925,9 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
hisi_sas_dereg_device(hisi_hba, device);
if (dev_is_sata(device)) {
- struct sas_phy *phy;
-
- phy = sas_get_local_phy(device);
+ struct sas_phy *phy = sas_get_local_phy(device);
rc = sas_phy_reset(phy, true);
-
if (rc == 0)
hisi_sas_release_task(hisi_hba, device);
sas_put_local_phy(phy);
@@ -2123,7 +2111,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
hisi_sas_port_notify_formed(sas_phy);
} else {
- struct hisi_sas_port *port = phy->port;
+ struct hisi_sas_port *port = phy->port;
if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
phy->in_reset) {
@@ -2296,12 +2284,14 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
goto err_out;
/* roundup to avoid overly large block size */
- max_command_entries_ru = roundup(max_command_entries, 64);
+ max_command_entries_ru = roundup(max_command_entries,
+ BLK_CNT_OPTIMIZE_MARK);
if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
else
sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
- sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
+
+ sz_slot_buf_ru = roundup(sz_slot_buf_ru, BLK_CNT_OPTIMIZE_MARK);
s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
slots_per_blk = s / sz_slot_buf_ru;
@@ -2466,7 +2456,8 @@ int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
if (IS_ERR(refclk))
dev_dbg(dev, "no ref clk property\n");
else
- hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
+ hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) /
+ HZ_TO_MHZ;
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
dev_err(dev, "could not get property phy-count\n");
@@ -2588,7 +2579,7 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
shost->max_channel = 1;
- shost->max_cmd_len = 16;
+ shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
if (hisi_hba->hw->slot_index_alloc) {
shost->can_queue = HISI_SAS_MAX_COMMANDS;
shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 6621d633b2cc..6d97339371fb 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1759,7 +1759,7 @@ static const struct scsi_host_template sht_v1_hw = {
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
.sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v1_hw_groups,
- .host_reset = hisi_sas_host_reset,
+ .host_reset = hisi_sas_host_reset,
};
static const struct hisi_sas_hw hisi_sas_v1_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 1e9830940f84..2adfedb8484c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2771,7 +2771,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
>> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
while (irq_msk) {
- if (irq_msk & 1) {
+ if (irq_msk & 1) {
u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT0);
@@ -3111,7 +3111,7 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
-static irqreturn_t cq_thread_v2_hw(int irq_no, void *p)
+static irqreturn_t cq_thread_v2_hw(int irq_no, void *p)
{
struct hisi_sas_cq *cq = p;
struct hisi_hba *hisi_hba = cq->hisi_hba;
@@ -3499,7 +3499,7 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
* numbered drive in the fourth byte.
* See SFF-8485 Rev. 0.7 Table 24.
*/
- void __iomem *reg_addr = hisi_hba->sgpio_regs +
+ void __iomem *reg_addr = hisi_hba->sgpio_regs +
reg_index * 4 + phy_no;
int data_idx = phy_no + 3 - (phy_no % 4) * 2;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 08dac9ae2f10..bc5d5356dd00 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -466,6 +466,12 @@
#define ITCT_HDR_RTOLT_OFF 48
#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+/*debugfs*/
+#define TWO_PARA_PER_LINE 2
+#define FOUR_PARA_PER_LINE 4
+#define DUMP_BUF_SIZE 8
+#define BIST_BUF_SIZE 16
+
struct hisi_sas_protect_iu_v3_hw {
u32 dw0;
u32 lbrtcv;
@@ -536,6 +542,43 @@ struct hisi_sas_err_record_v3 {
#define BASE_VECTORS_V3_HW 16
#define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
+#define IRQ_PHY_UP_DOWN_INDEX 1
+#define IRQ_CHL_INDEX 2
+#define IRQ_AXI_INDEX 11
+
+#define DELAY_FOR_RESET_HW 100
+#define HDR_SG_MOD 0x2
+#define LUN_SIZE 8
+#define ATTR_PRIO_REGION 9
+#define CDB_REGION 12
+#define PRIO_OFF 3
+#define TMF_REGION 10
+#define TAG_MSB 12
+#define TAG_LSB 13
+#define SMP_FRAME_TYPE 2
+#define SMP_CRC_SIZE 4
+#define HDR_TAG_OFF 3
+#define HOST_NO_OFF 6
+#define PHY_NO_OFF 7
+#define IDENTIFY_REG_READ 6
+#define LINK_RESET_TIMEOUT_OFF 4
+#define DECIMALISM_FLAG 10
+#define WAIT_RETRY 100
+#define WAIT_TMROUT 5000
+
+#define ID_DWORD0_INDEX 0
+#define ID_DWORD1_INDEX 1
+#define ID_DWORD2_INDEX 2
+#define ID_DWORD3_INDEX 3
+#define ID_DWORD4_INDEX 4
+#define ID_DWORD5_INDEX 5
+#define TICKS_BIT_INDEX 24
+#define COUNT_BIT_INDEX 8
+
+#define PORT_REG_LENGTH 0x100
+#define GLOBAL_REG_LENGTH 0x800
+#define AXI_REG_LENGTH 0x61
+#define RAS_REG_LENGTH 0x10
#define CHNL_INT_STS_MSK 0xeeeeeeee
#define CHNL_INT_STS_PHY_MSK 0xe
@@ -811,17 +854,17 @@ static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
identify_buffer = (u32 *)(&identify_frame);
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
- __swab32(identify_buffer[0]));
+ __swab32(identify_buffer[ID_DWORD0_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
- __swab32(identify_buffer[1]));
+ __swab32(identify_buffer[ID_DWORD1_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
- __swab32(identify_buffer[2]));
+ __swab32(identify_buffer[ID_DWORD2_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
- __swab32(identify_buffer[3]));
+ __swab32(identify_buffer[ID_DWORD3_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
- __swab32(identify_buffer[4]));
+ __swab32(identify_buffer[ID_DWORD4_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
- __swab32(identify_buffer[5]));
+ __swab32(identify_buffer[ID_DWORD5_INDEX]));
}
static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
@@ -941,7 +984,7 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
/* Disable all of the PHYs */
hisi_sas_stop_phys(hisi_hba);
- udelay(50);
+ udelay(HISI_SAS_DELAY_FOR_PHY_DISABLE);
/* Ensure axi bus idle */
ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val,
@@ -981,7 +1024,7 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
return rc;
}
- msleep(100);
+ msleep(DELAY_FOR_RESET_HW);
init_reg_v3_hw(hisi_hba);
if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) {
@@ -1030,7 +1073,7 @@ static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
cfg &= ~PHY_CFG_ENA_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
- mdelay(50);
+ mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE);
state = hisi_sas_read32(hisi_hba, PHY_STATE);
if (state & BIT(phy_no)) {
@@ -1066,7 +1109,7 @@ static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
txid_auto | TX_HARDRST_MSK);
}
- msleep(100);
+ msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE);
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
}
@@ -1111,7 +1154,8 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
for (i = 0; i < hisi_hba->n_phy; i++)
if (phy_state & BIT(i))
- if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ if (((phy_port_num_ma >> (i * HISI_SAS_REG_MEM_SIZE)) & 0xf) ==
+ port_id)
bitmap |= BIT(i);
return bitmap;
@@ -1308,10 +1352,10 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
/* map itct entry */
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
- dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
- + 3) / 4) << CMD_HDR_CFL_OFF) |
- ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
- (2 << CMD_HDR_SG_MOD_OFF);
+ dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) +
+ 3) / BYTE_TO_DW) << CMD_HDR_CFL_OFF) |
+ ((HISI_SAS_MAX_SSP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_MRFL_OFF) |
+ (HDR_SG_MOD << CMD_HDR_SG_MOD_OFF);
hdr->dw2 = cpu_to_le32(dw2);
hdr->transfer_tags = cpu_to_le32(slot->idx);
@@ -1331,18 +1375,19 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
sizeof(struct ssp_frame_hdr);
- memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+ memcpy(buf_cmd, &task->ssp_task.LUN, LUN_SIZE);
if (!tmf) {
- buf_cmd[9] = ssp_task->task_attr;
- memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ buf_cmd[ATTR_PRIO_REGION] = ssp_task->task_attr;
+ memcpy(buf_cmd + CDB_REGION, scsi_cmnd->cmnd,
+ scsi_cmnd->cmd_len);
} else {
- buf_cmd[10] = tmf->tmf;
+ buf_cmd[TMF_REGION] = tmf->tmf;
switch (tmf->tmf) {
case TMF_ABORT_TASK:
case TMF_QUERY_TASK:
- buf_cmd[12] =
+ buf_cmd[TAG_MSB] =
(tmf->tag_of_task_to_be_managed >> 8) & 0xff;
- buf_cmd[13] =
+ buf_cmd[TAG_LSB] =
tmf->tag_of_task_to_be_managed & 0xff;
break;
default:
@@ -1375,7 +1420,8 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
unsigned int interval = scsi_prot_interval(scsi_cmnd);
unsigned int ilog2_interval = ilog2(interval);
- len = (task->total_xfer_len >> ilog2_interval) * 8;
+ len = (task->total_xfer_len >> ilog2_interval) *
+ BYTE_TO_DDW;
}
}
@@ -1395,6 +1441,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev = device->lldd_dev;
dma_addr_t req_dma_addr;
unsigned int req_len;
+ u32 cfl;
/* req */
sg_req = &task->smp_task.smp_req;
@@ -1405,7 +1452,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
/* dw0 */
hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
(1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
- (2 << CMD_HDR_CMD_OFF)); /* smp */
+ (SMP_FRAME_TYPE << CMD_HDR_CMD_OFF)); /* smp */
/* map itct entry */
hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
@@ -1413,8 +1460,9 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
(DIR_NO_DATA << CMD_HDR_DIR_OFF));
/* dw2 */
- hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
- (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
+ cfl = (req_len - SMP_CRC_SIZE) / BYTE_TO_DW;
+ hdr->dw2 = cpu_to_le32((cfl << CMD_HDR_CFL_OFF) |
+ (HISI_SAS_MAX_SMP_RESP_SZ / BYTE_TO_DW <<
CMD_HDR_MRFL_OFF));
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
@@ -1479,12 +1527,13 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct ata_queued_cmd *qc = task->uldd_task;
hdr_tag = qc->tag;
- task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ task->ata_task.fis.sector_count |=
+ (u8)(hdr_tag << HDR_TAG_OFF);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
- dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
- 2 << CMD_HDR_SG_MOD_OFF;
+ dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_CFL_OFF |
+ HDR_SG_MOD << CMD_HDR_SG_MOD_OFF;
hdr->dw2 = cpu_to_le32(dw2);
/* dw3 */
@@ -1544,9 +1593,9 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
- port_id = (port_id >> (4 * phy_no)) & 0xf;
+ port_id = (port_id >> (HISI_SAS_REG_MEM_SIZE * phy_no)) & 0xf;
link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
- link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+ link_rate = (link_rate >> (phy_no * HISI_SAS_REG_MEM_SIZE)) & 0xf;
if (port_id == 0xf) {
dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
@@ -1579,8 +1628,8 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
sas_phy->oob_mode = SATA_OOB_MODE;
attached_sas_addr[0] = 0x50;
- attached_sas_addr[6] = shost->host_no;
- attached_sas_addr[7] = phy_no;
+ attached_sas_addr[HOST_NO_OFF] = shost->host_no;
+ attached_sas_addr[PHY_NO_OFF] = phy_no;
memcpy(sas_phy->attached_sas_addr,
attached_sas_addr,
SAS_ADDR_SIZE);
@@ -1596,7 +1645,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
(struct sas_identify_frame *)frame_rcvd;
dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < IDENTIFY_REG_READ; i++) {
u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
RX_IDAF_DWORD0 + (i * 4));
frame_rcvd[i] = __swab32(idaf);
@@ -1701,7 +1750,7 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
& 0x11111111;
while (irq_msk) {
- if (irq_msk & 1) {
+ if (irq_msk & 1) {
u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT0);
u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -1866,7 +1915,7 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
phy_no, reg_value);
- if (reg_value & BIT(4))
+ if (reg_value & BIT(LINK_RESET_TIMEOUT_OFF))
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
}
@@ -1924,8 +1973,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
u32 irq_msk;
int phy_no = 0;
- irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
- & CHNL_INT_STS_MSK;
+ irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & CHNL_INT_STS_MSK;
while (irq_msk) {
if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH)))
@@ -2570,7 +2618,6 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
if (vectors < 0)
return -ENOENT;
-
hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW - hisi_hba->iopoll_q_cnt;
shost->nr_hw_queues = hisi_hba->cq_nvecs + hisi_hba->iopoll_q_cnt;
@@ -2583,7 +2630,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
struct pci_dev *pdev = hisi_hba->pci_dev;
int rc, i;
- rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX),
int_phy_up_down_bcast_v3_hw, 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
@@ -2591,7 +2638,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
return -ENOENT;
}
- rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_CHL_INDEX),
int_chnl_int_v3_hw, 0,
DRV_NAME " channel", hisi_hba);
if (rc) {
@@ -2599,7 +2646,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
return -ENOENT;
}
- rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_AXI_INDEX),
fatal_axi_int_v3_hw, 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
@@ -2612,7 +2659,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- int nr = hisi_sas_intr_conv ? 16 : 16 + i;
+ int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW :
+ BASE_VECTORS_V3_HW + i;
unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED :
IRQF_ONESHOT;
@@ -2670,14 +2718,14 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
struct pci_dev *pdev = hisi_hba->pci_dev;
int i;
- synchronize_irq(pci_irq_vector(pdev, 1));
- synchronize_irq(pci_irq_vector(pdev, 2));
- synchronize_irq(pci_irq_vector(pdev, 11));
+ synchronize_irq(pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX));
+ synchronize_irq(pci_irq_vector(pdev, IRQ_CHL_INDEX));
+ synchronize_irq(pci_irq_vector(pdev, IRQ_AXI_INDEX));
for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
for (i = 0; i < hisi_hba->cq_nvecs; i++)
- synchronize_irq(pci_irq_vector(pdev, i + 16));
+ synchronize_irq(pci_irq_vector(pdev, i + BASE_VECTORS_V3_HW));
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
@@ -2709,7 +2757,7 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_stop_phys(hisi_hba);
- mdelay(10);
+ mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE);
reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
AM_CTRL_GLOBAL);
@@ -2846,13 +2894,13 @@ static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev,
u32 intr_coal_ticks;
int ret;
- ret = kstrtou32(buf, 10, &intr_coal_ticks);
+ ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_ticks);
if (ret) {
dev_err(dev, "Input data of interrupt coalesce unmatch\n");
return -EINVAL;
}
- if (intr_coal_ticks >= BIT(24)) {
+ if (intr_coal_ticks >= BIT(TICKS_BIT_INDEX)) {
dev_err(dev, "intr_coal_ticks must be less than 2^24!\n");
return -EINVAL;
}
@@ -2885,13 +2933,13 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
u32 intr_coal_count;
int ret;
- ret = kstrtou32(buf, 10, &intr_coal_count);
+ ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_count);
if (ret) {
dev_err(dev, "Input data of interrupt coalesce unmatch\n");
return -EINVAL;
}
- if (intr_coal_count >= BIT(8)) {
+ if (intr_coal_count >= BIT(COUNT_BIT_INDEX)) {
dev_err(dev, "intr_coal_count must be less than 2^8!\n");
return -EINVAL;
}
@@ -3023,7 +3071,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_port_reg = {
.lu = debugfs_port_reg_lu,
- .count = 0x100,
+ .count = PORT_REG_LENGTH,
.base_off = PORT_BASE,
};
@@ -3097,7 +3145,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
.lu = debugfs_global_reg_lu,
- .count = 0x800,
+ .count = GLOBAL_REG_LENGTH,
};
static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
@@ -3110,7 +3158,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
.lu = debugfs_axi_reg_lu,
- .count = 0x61,
+ .count = AXI_REG_LENGTH,
.base_off = AXI_MASTER_CFG_BASE,
};
@@ -3127,7 +3175,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
.lu = debugfs_ras_reg_lu,
- .count = 0x10,
+ .count = RAS_REG_LENGTH,
.base_off = RAS_BASE,
};
@@ -3136,7 +3184,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
struct Scsi_Host *shost = hisi_hba->shost;
scsi_block_requests(shost);
- wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
+ wait_cmds_complete_timeout_v3_hw(hisi_hba, WAIT_RETRY, WAIT_TMROUT);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
hisi_sas_sync_cqs(hisi_hba);
@@ -3177,7 +3225,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
return;
}
- memset(buf, 0, cache_dw_size * 4);
+ memset(buf, 0, cache_dw_size * BYTE_TO_DW);
buf[0] = val;
for (i = 1; i < cache_dw_size; i++)
@@ -3224,7 +3272,7 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
/* init OOB link rate as 1.5 Gbits */
reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
- reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
+ reg_val |= (SAS_LINK_RATE_1_5_GBPS << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val);
/* enable PHY */
@@ -3233,6 +3281,9 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
#define SAS_PHY_BIST_CODE_INIT 0x1
#define SAS_PHY_BIST_CODE1_INIT 0X80
+#define SAS_PHY_BIST_INIT_DELAY 100
+#define SAS_PHY_BIST_LOOP_TEST_0 1
+#define SAS_PHY_BIST_LOOP_TEST_1 2
static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
{
u32 reg_val, mode_tmp;
@@ -3251,12 +3302,13 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS],
ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE],
fix_code[FIXED_CODE_1]);
- mode_tmp = path_mode ? 2 : 1;
+ mode_tmp = path_mode ? SAS_PHY_BIST_LOOP_TEST_1 :
+ SAS_PHY_BIST_LOOP_TEST_0;
if (enable) {
/* some preparations before bist test */
hisi_sas_bist_test_prep_v3_hw(hisi_hba);
- /* set linkrate of bit test*/
+ /* set linkrate of bit test */
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
PROG_PHY_LINK_RATE);
reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
@@ -3294,13 +3346,13 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
SAS_PHY_BIST_CODE1_INIT);
}
- mdelay(100);
+ mdelay(SAS_PHY_BIST_INIT_DELAY);
reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
reg_val);
/* clear error bit */
- mdelay(100);
+ mdelay(SAS_PHY_BIST_INIT_DELAY);
hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT);
} else {
/* disable bist test and recover it */
@@ -3354,7 +3406,7 @@ static const struct scsi_host_template sht_v3_hw = {
.shost_groups = host_v3_hw_groups,
.sdev_groups = sdev_groups_v3_hw,
.tag_alloc_policy_rr = true,
- .host_reset = hisi_sas_host_reset,
+ .host_reset = hisi_sas_host_reset,
.host_tagset = 1,
.mq_poll = queue_complete_v3_hw,
};
@@ -3496,7 +3548,7 @@ static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba)
for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
for (i = 0; i < port->count; i++, databuf++) {
- offset = port->base_off + 4 * i;
+ offset = port->base_off + HISI_SAS_REG_MEM_SIZE * i;
*databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt,
offset);
}
@@ -3510,7 +3562,8 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba)
int i;
for (i = 0; i < debugfs_global_reg.count; i++, databuf++)
- *databuf = hisi_sas_read32(hisi_hba, 4 * i);
+ *databuf = hisi_sas_read32(hisi_hba,
+ HISI_SAS_REG_MEM_SIZE * i);
}
static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba)
@@ -3521,7 +3574,9 @@ static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba)
int i;
for (i = 0; i < axi->count; i++, databuf++)
- *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off);
+ *databuf = hisi_sas_read32(hisi_hba,
+ HISI_SAS_REG_MEM_SIZE * i +
+ axi->base_off);
}
static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba)
@@ -3532,7 +3587,9 @@ static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba)
int i;
for (i = 0; i < ras->count; i++, databuf++)
- *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off);
+ *databuf = hisi_sas_read32(hisi_hba,
+ HISI_SAS_REG_MEM_SIZE * i +
+ ras->base_off);
}
static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba)
@@ -3595,12 +3652,11 @@ static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
int i;
for (i = 0; i < reg->count; i++) {
- int off = i * 4;
+ int off = i * HISI_SAS_REG_MEM_SIZE;
const char *name;
name = debugfs_to_reg_name_v3_hw(off, reg->base_off,
reg->lu);
-
if (name)
seq_printf(s, "0x%08x 0x%08x %s\n", off,
regs_val[i], name);
@@ -3673,9 +3729,9 @@ static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index,
/* completion header size not fixed per HW version */
seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 8; i++, ptr++) {
+ for (i = 1; i <= sz / BYTE_TO_DDW; i++, ptr++) {
seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
- if (!(i % 2))
+ if (!(i % TWO_PARA_PER_LINE))
seq_puts(s, "\n\t");
}
@@ -3689,9 +3745,9 @@ static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index,
/* completion header size not fixed per HW version */
seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 4; i++, ptr++) {
+ for (i = 1; i <= sz / BYTE_TO_DW; i++, ptr++) {
seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
- if (!(i % 4))
+ if (!(i % FOUR_PARA_PER_LINE))
seq_puts(s, "\n\t");
}
seq_puts(s, "\n");
@@ -3776,7 +3832,7 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
struct hisi_sas_iost_itct_cache *iost_cache =
debugfs_iost_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW;
int i, tab_idx;
__le64 *iost;
@@ -3824,7 +3880,7 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
struct hisi_sas_iost_itct_cache *itct_cache =
debugfs_itct_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW;
int i, tab_idx;
__le64 *itct;
@@ -3853,12 +3909,12 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
u64 *debugfs_timestamp;
struct dentry *dump_dentry;
struct dentry *dentry;
- char name[256];
+ char name[NAME_BUF_SIZE];
int p;
int c;
int d;
- snprintf(name, 256, "%d", index);
+ snprintf(name, NAME_BUF_SIZE, "%d", index);
dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
@@ -3874,7 +3930,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
/* Create port dir and files */
dentry = debugfs_create_dir("port", dump_dentry);
for (p = 0; p < hisi_hba->n_phy; p++) {
- snprintf(name, 256, "%d", p);
+ snprintf(name, NAME_BUF_SIZE, "%d", p);
debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_port_reg[index][p],
@@ -3884,7 +3940,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
/* Create CQ dir and files */
dentry = debugfs_create_dir("cq", dump_dentry);
for (c = 0; c < hisi_hba->queue_count; c++) {
- snprintf(name, 256, "%d", c);
+ snprintf(name, NAME_BUF_SIZE, "%d", c);
debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_cq[index][c],
@@ -3894,7 +3950,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
/* Create DQ dir and files */
dentry = debugfs_create_dir("dq", dump_dentry);
for (d = 0; d < hisi_hba->queue_count; d++) {
- snprintf(name, 256, "%d", d);
+ snprintf(name, NAME_BUF_SIZE, "%d", d);
debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_dq[index][d],
@@ -3931,9 +3987,9 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
size_t count, loff_t *ppos)
{
struct hisi_hba *hisi_hba = file->f_inode->i_private;
- char buf[8];
+ char buf[DUMP_BUF_SIZE];
- if (count > 8)
+ if (count > DUMP_BUF_SIZE)
return -EFAULT;
if (copy_from_user(buf, user_buf, count))
@@ -3997,7 +4053,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp,
{
struct seq_file *m = filp->private_data;
struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
+ char kbuf[BIST_BUF_SIZE] = {}, *pkbuf;
bool found = false;
int i;
@@ -4014,7 +4070,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp,
for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) {
if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name,
- pkbuf, 16)) {
+ pkbuf, BIST_BUF_SIZE)) {
hisi_hba->debugfs_bist_linkrate =
debugfs_loop_linkrate_v3_hw[i].value;
found = true;
@@ -4072,7 +4128,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp,
{
struct seq_file *m = filp->private_data;
struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
+ char kbuf[BIST_BUF_SIZE] = {}, *pkbuf;
bool found = false;
int i;
@@ -4089,7 +4145,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp,
for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) {
if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name,
- pkbuf, 16)) {
+ pkbuf, BIST_BUF_SIZE)) {
hisi_hba->debugfs_bist_code_mode =
debugfs_loop_code_mode_v3_hw[i].value;
found = true;
@@ -4204,7 +4260,7 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp,
{
struct seq_file *m = filp->private_data;
struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
+ char kbuf[BIST_BUF_SIZE] = {}, *pkbuf;
bool found = false;
int i;
@@ -4220,7 +4276,8 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp,
pkbuf = strstrip(kbuf);
for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) {
- if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) {
+ if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf,
+ BIST_BUF_SIZE)) {
hisi_hba->debugfs_bist_mode =
debugfs_loop_modes_v3_hw[i].value;
found = true;
@@ -4499,8 +4556,9 @@ static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p)
debugfs_read_fifo_data_v3_hw(phy);
- debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4,
- (__le32 *)phy->fifo.rd_data);
+ debugfs_show_row_32_v3_hw(s, 0,
+ HISI_SAS_FIFO_DATA_DW_SIZE * HISI_SAS_REG_MEM_SIZE,
+ (__le32 *)phy->fifo.rd_data);
return 0;
}
@@ -4632,14 +4690,14 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
struct hisi_sas_debugfs_regs *regs =
&hisi_hba->debugfs_regs[dump_index][r];
- sz = debugfs_reg_array_v3_hw[r]->count * 4;
+ sz = debugfs_reg_array_v3_hw[r]->count * HISI_SAS_REG_MEM_SIZE;
regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
if (!regs->data)
goto fail;
regs->hisi_hba = hisi_hba;
}
- sz = debugfs_port_reg.count * 4;
+ sz = debugfs_port_reg.count * HISI_SAS_REG_MEM_SIZE;
for (p = 0; p < hisi_hba->n_phy; p++) {
struct hisi_sas_debugfs_port *port =
&hisi_hba->debugfs_port_reg[dump_index][p];
@@ -4749,11 +4807,11 @@ static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct dentry *dir = debugfs_create_dir("phy_down_cnt",
hisi_hba->debugfs_dir);
- char name[16];
+ char name[NAME_BUF_SIZE];
int phy_no;
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
- snprintf(name, 16, "%d", phy_no);
+ snprintf(name, NAME_BUF_SIZE, "%d", phy_no);
debugfs_create_file(name, 0600, dir,
&hisi_hba->phy[phy_no],
&debugfs_phy_down_cnt_v3_hw_fops);
@@ -4938,7 +4996,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
shost->max_channel = 1;
- shost->max_cmd_len = 16;
+ shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
if (hisi_hba->iopoll_q_cnt)
@@ -5016,12 +5074,13 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
{
int i;
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba);
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba);
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_CHL_INDEX), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_AXI_INDEX), hisi_hba);
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- int nr = hisi_sas_intr_conv ? 16 : 16 + i;
+ int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW :
+ BASE_VECTORS_V3_HW + i;
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
}
@@ -5051,9 +5110,11 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = hisi_hba->shost;
struct device *dev = hisi_hba->dev;
int rc;
+ wait_event(shost->host_wait, !scsi_host_in_recovery(shost));
dev_info(dev, "FLR prepare\n");
down(&hisi_hba->sem);
set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 287e1ba8ddd7..82deb6a83a8c 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -392,36 +392,6 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
}
}
-enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
-{
- struct sci_base_state_machine *sm = &idev->sm;
- enum sci_remote_device_states state = sm->current_state_id;
-
- switch (state) {
- case SCI_DEV_INITIAL:
- case SCI_DEV_STOPPED:
- case SCI_DEV_STARTING:
- case SCI_SMP_DEV_IDLE:
- case SCI_SMP_DEV_CMD:
- case SCI_DEV_STOPPING:
- case SCI_DEV_FAILED:
- case SCI_DEV_RESETTING:
- case SCI_DEV_FINAL:
- default:
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
- __func__, dev_state_name(state));
- return SCI_FAILURE_INVALID_STATE;
- case SCI_DEV_READY:
- case SCI_STP_DEV_IDLE:
- case SCI_STP_DEV_CMD:
- case SCI_STP_DEV_NCQ:
- case SCI_STP_DEV_NCQ_ERROR:
- case SCI_STP_DEV_AWAIT_RESET:
- sci_change_state(sm, SCI_DEV_RESETTING);
- return SCI_SUCCESS;
- }
-}
-
enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
u32 frame_index)
{
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 561ae3f2cbbd..c1fdf45751cd 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -160,21 +160,6 @@ enum sci_status sci_remote_device_stop(
u32 timeout);
/**
- * sci_remote_device_reset() - This method will reset the device making it
- * ready for operation. This method must be called anytime the device is
- * reset either through a SMP phy control or a port hard reset request.
- * @remote_device: This parameter specifies the device to be reset.
- *
- * This method does not actually cause the device hardware to be reset. This
- * method resets the software object so that it will be operational after a
- * device hardware reset completes. An indication of whether the device reset
- * was accepted. SCI_SUCCESS This value is returned if the device reset is
- * started.
- */
-enum sci_status sci_remote_device_reset(
- struct isci_remote_device *idev);
-
-/**
* enum sci_remote_device_states - This enumeration depicts all the states
* for the common remote device state machine.
* @SCI_DEV_INITIAL: Simply the initial state for the base remote device
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 397216ff2c7e..54ee8ecec3b3 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -291,6 +291,138 @@ buffer_done:
return len;
}
+static ssize_t
+lpfc_vmid_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_vmid *vmp;
+ int len = 0, i, j, k, cpu;
+ char hxstr[LPFC_MAX_VMID_SIZE * 3] = {0};
+ struct timespec64 curr_tm;
+ struct lpfc_vmid_priority_range *vr;
+ u64 *lta, rct_acc = 0, max_lta = 0;
+ struct tm tm_val;
+
+ ktime_get_ts64(&curr_tm);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "Key 'vmid':\n");
+
+ /* if enabled continue, else return */
+ if (lpfc_is_vmid_enabled(phba)) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "lpfc VMID Page: ON\n\n");
+ } else {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "lpfc VMID Page: OFF\n\n");
+ return len;
+ }
+
+ /* if using priority tagging */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "VMID priority ranges:\n");
+ vr = vport->vmid_priority.vmid_range;
+ for (i = 0; i < vport->vmid_priority.num_descriptors; ++i) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "\t[x%x - x%x], qos: x%x\n",
+ vr->low, vr->high, vr->qos);
+ vr++;
+ }
+ }
+
+ for (i = 0; i < phba->cfg_max_vmid; i++) {
+ vmp = &vport->vmid[i];
+ max_lta = 0;
+
+ /* only if the slot is used */
+ if (!(vmp->flag & LPFC_VMID_SLOT_USED) ||
+ !(vmp->flag & LPFC_VMID_REGISTERED))
+ continue;
+
+ /* if using priority tagging */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "VEM ID: %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ vport->lpfc_vmid_host_uuid[0],
+ vport->lpfc_vmid_host_uuid[1],
+ vport->lpfc_vmid_host_uuid[2],
+ vport->lpfc_vmid_host_uuid[3],
+ vport->lpfc_vmid_host_uuid[4],
+ vport->lpfc_vmid_host_uuid[5],
+ vport->lpfc_vmid_host_uuid[6],
+ vport->lpfc_vmid_host_uuid[7],
+ vport->lpfc_vmid_host_uuid[8],
+ vport->lpfc_vmid_host_uuid[9],
+ vport->lpfc_vmid_host_uuid[10],
+ vport->lpfc_vmid_host_uuid[11],
+ vport->lpfc_vmid_host_uuid[12],
+ vport->lpfc_vmid_host_uuid[13],
+ vport->lpfc_vmid_host_uuid[14],
+ vport->lpfc_vmid_host_uuid[15]);
+ }
+
+ /* IO stats */
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "ID00 READs:%llx WRITEs:%llx\n",
+ vmp->io_rd_cnt,
+ vmp->io_wr_cnt);
+ for (j = 0, k = 0; j < strlen(vmp->host_vmid); j++, k += 3)
+ sprintf((char *)(hxstr + k), "%2x ", vmp->host_vmid[j]);
+ /* UUIDs */
+ len += scnprintf(buf + len, PAGE_SIZE - len, "UUID:\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", hxstr);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "String (%s)\n",
+ vmp->host_vmid);
+
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "CS_CTL VMID: 0x%x\n",
+ vmp->un.cs_ctl_vmid);
+ else
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "Application id: 0x%x\n",
+ vmp->un.app_id);
+
+ /* calculate the last access time */
+ for_each_possible_cpu(cpu) {
+ lta = per_cpu_ptr(vmp->last_io_time, cpu);
+ if (!lta)
+ continue;
+
+ /* if last access time is less than timeout */
+ if (time_after((unsigned long)*lta, jiffies))
+ continue;
+
+ if (*lta > max_lta)
+ max_lta = *lta;
+ }
+
+ rct_acc = jiffies_to_msecs(jiffies - max_lta) / 1000;
+ /* current time */
+ time64_to_tm(ktime_get_real_seconds(),
+ -(sys_tz.tz_minuteswest * 60) - rct_acc, &tm_val);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "Last Access Time :"
+ "%ld-%d-%dT%02d:%02d:%02d\n\n",
+ 1900 + tm_val.tm_year, tm_val.tm_mon + 1,
+ tm_val.tm_mday, tm_val.tm_hour,
+ tm_val.tm_min, tm_val.tm_sec);
+
+ if (len >= PAGE_SIZE)
+ return len;
+
+ memset(hxstr, 0, LPFC_MAX_VMID_SIZE * 3);
+ }
+ return len;
+}
+
/**
* lpfc_drvr_version_show - Return the Emulex driver string with version number
* @dev: class unused variable.
@@ -3011,6 +3143,7 @@ static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
NULL);
static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
+static DEVICE_ATTR_RO(lpfc_vmid_info);
#define WWN_SZ 8
/**
@@ -6117,6 +6250,7 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_vmid_inactivity_timeout.attr,
&dev_attr_lpfc_vmid_app_header.attr,
&dev_attr_lpfc_vmid_priority_tagging.attr,
+ &dev_attr_lpfc_vmid_info.attr,
NULL,
};
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index c8f8496bbdf8..d61d979f9b77 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2687,8 +2687,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
evt->wait_time_stamp = jiffies;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
- msecs_to_jiffies(1000 *
- ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+ secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT));
if (list_empty(&evt->events_to_see))
ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
else {
@@ -3258,8 +3257,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
evt->waiting = 1;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
- msecs_to_jiffies(1000 *
- ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+ secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT));
evt->waiting = 0;
if (list_empty(&evt->events_to_see)) {
rc = (time_left) ? -EINTR : -ETIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 179be6c5a43e..3d15a964f5c9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -161,7 +161,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
unsigned long iflags;
- bool nvme_reg = false;
+ bool drop_initial_node_ref = false;
ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
if (!ndlp)
@@ -188,8 +188,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
- if (ndlp->fc4_xpt_flags & NVME_XPT_REGD)
- nvme_reg = true;
+ /* Only 1 thread can drop the initial node reference.
+ * If not registered for NVME and NLP_DROPPED flag is
+ * clear, remove the initial reference.
+ */
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ drop_initial_node_ref = true;
/* The scsi_transport is done with the rport so lpfc cannot
* call to unregister.
@@ -200,13 +205,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
/* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node,
* unregister calls were made to the scsi and nvme
* transports and refcnt was already decremented. Clear
- * the NLP_XPT_REGD flag only if the NVME Rport is
+ * the NLP_XPT_REGD flag only if the NVME nrport is
* confirmed unregistered.
*/
- if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
- ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
spin_unlock_irqrestore(&ndlp->lock, iflags);
- lpfc_nlp_put(ndlp); /* may free ndlp */
+
+ /* Release scsi transport reference */
+ lpfc_nlp_put(ndlp);
} else {
spin_unlock_irqrestore(&ndlp->lock, iflags);
}
@@ -214,14 +222,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_unlock_irqrestore(&ndlp->lock, iflags);
}
- /* Only 1 thread can drop the initial node reference. If
- * another thread has set NLP_DROPPED, this thread is done.
- */
- if (nvme_reg || test_bit(NLP_DROPPED, &ndlp->nlp_flag))
- return;
-
- set_bit(NLP_DROPPED, &ndlp->nlp_flag);
- lpfc_nlp_put(ndlp);
+ if (drop_initial_node_ref)
+ lpfc_nlp_put(ndlp);
return;
}
@@ -4695,9 +4697,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
vport->phba->nport_event_cnt++;
if (vport->phba->nvmet_support == 0) {
- /* Start devloss if target. */
- if (ndlp->nlp_type & NLP_NVME_TARGET)
- lpfc_nvme_unregister_port(vport, ndlp);
+ lpfc_nvme_unregister_port(vport, ndlp);
} else {
/* NVMET has no upcall. */
lpfc_nlp_put(ndlp);
@@ -5053,7 +5053,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
case CMD_GEN_REQUEST64_CR:
if (iocb->ndlp == ndlp)
return 1;
- fallthrough;
+ break;
case CMD_ELS_REQUEST64_CR:
if (remote_id == ndlp->nlp_DID)
return 1;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 90021653e59e..2400602a8561 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1907,6 +1907,9 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
uint32_t intr_mode;
LPFC_MBOXQ_t *mboxq;
+ /* Notifying the transport that the targets are going offline. */
+ lpfc_scsi_dev_block(phba);
+
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) {
/*
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index b1adb9f59097..a6647dd360d1 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2508,7 +2508,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"6031 RemotePort Registration failed "
"err: %d, DID x%06x ref %u\n",
ret, ndlp->nlp_DID, kref_read(&ndlp->kref));
- lpfc_nlp_put(ndlp);
+
+ /* Only release reference if one was taken for this request */
+ if (!oldrport)
+ lpfc_nlp_put(ndlp);
}
return ret;
@@ -2614,7 +2617,8 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* clear any rport state until the transport calls back.
*/
- if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ if ((ndlp->nlp_type & NLP_NVME_TARGET) ||
+ (remoteport->port_role & FC_PORT_ROLE_NVME_TARGET)) {
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6574f9e74476..2ebb073e4ef3 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -3926,12 +3926,19 @@ void lpfc_poll_eratt(struct timer_list *t)
uint64_t sli_intr, cnt;
phba = from_timer(phba, t, eratt_poll);
- if (!test_bit(HBA_SETUP, &phba->hba_flag))
- return;
if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ !test_bit(HBA_SETUP, &phba->hba_flag)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0663 HBA still initializing 0x%lx, restart "
+ "timer\n",
+ phba->hba_flag);
+ goto restart_timer;
+ }
+
/* Here we will also keep track of interrupts per sec of the hba */
sli_intr = phba->sli.slistat.sli_intr;
@@ -3950,13 +3957,16 @@ void lpfc_poll_eratt(struct timer_list *t)
/* Check chip HA register for error event */
eratt = lpfc_sli_check_eratt(phba);
- if (eratt)
+ if (eratt) {
/* Tell the worker thread there is work to do */
lpfc_worker_wake_up(phba);
- else
- /* Restart the timer for next eratt poll */
- mod_timer(&phba->eratt_poll,
- jiffies + secs_to_jiffies(phba->eratt_poll_interval));
+ return;
+ }
+
+restart_timer:
+ /* Restart the timer for next eratt poll */
+ mod_timer(&phba->eratt_poll,
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
return;
}
@@ -6003,9 +6013,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
- memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
- strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
+ memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str,
sizeof(phba->BIOSVersion));
+ phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0';
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 638b50f35287..749688aa8a82 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.8"
+#define LPFC_DRIVER_VERSION "14.4.0.9"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index cc56a7334319..2797aa75a689 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -505,7 +505,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
wait_event_timeout(waitq,
!test_bit(NLP_WAIT_FOR_LOGO,
&ndlp->save_flags),
- msecs_to_jiffies(phba->fc_ratov * 2000));
+ secs_to_jiffies(phba->fc_ratov * 2));
if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags))
goto logo_cmpl;
@@ -703,7 +703,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
wait_event_timeout(waitq,
!test_bit(NLP_WAIT_FOR_DA_ID,
&ndlp->save_flags),
- msecs_to_jiffies(phba->fc_ratov * 2000));
+ secs_to_jiffies(phba->fc_ratov * 2));
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index c186b892150f..ce444efd859e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -985,6 +985,10 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
goto out;
}
}
+ dprint_event_bh(mrioc,
+ "exposed target device with handle(0x%04x), perst_id(%d)\n",
+ tgtdev->dev_handle, perst_id);
+ goto out;
} else
mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
out:
@@ -1344,9 +1348,9 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
(struct mpi3_event_data_device_status_change *)fwevt->event_data;
dev_handle = le16_to_cpu(evtdata->dev_handle);
- ioc_info(mrioc,
- "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
- __func__, dev_handle, evtdata->reason_code);
+ dprint_event_bh(mrioc,
+ "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n",
+ dev_handle, evtdata->reason_code);
switch (evtdata->reason_code) {
case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
delete = 1;
@@ -1365,8 +1369,13 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
}
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
- if (!tgtdev)
+ if (!tgtdev) {
+ dprint_event_bh(mrioc,
+ "processing device status change event bottom half,\n"
+ "cannot identify target device for handle(0x%04x), rc(0x%02x)\n",
+ dev_handle, evtdata->reason_code);
goto out;
+ }
if (uhide) {
tgtdev->is_hidden = 0;
if (!tgtdev->host_exposed)
@@ -1406,12 +1415,17 @@ static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
perst_id = le16_to_cpu(dev_pg0->persistent_id);
dev_handle = le16_to_cpu(dev_pg0->dev_handle);
- ioc_info(mrioc,
- "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
- __func__, dev_handle, perst_id);
+ dprint_event_bh(mrioc,
+ "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n",
+ dev_handle, perst_id);
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
- if (!tgtdev)
+ if (!tgtdev) {
+ dprint_event_bh(mrioc,
+ "cannot identify target device for device info\n"
+ "change event handle(0x%04x), perst_id(%d)\n",
+ dev_handle, perst_id);
goto out;
+ }
mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
if (!tgtdev->is_hidden && !tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
@@ -2012,8 +2026,11 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_fwevt_del_from_list(mrioc, fwevt);
mrioc->current_event = fwevt;
- if (mrioc->stop_drv_processing)
+ if (mrioc->stop_drv_processing) {
+ dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n"
+ "due to stop_drv_processing\n", fwevt->event_id);
goto out;
+ }
if (mrioc->unrecoverable) {
dprint_event_bh(mrioc,
@@ -2025,6 +2042,9 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
if (!fwevt->process_evt)
goto evt_ack;
+ dprint_event_bh(mrioc, "processing event(0x%02x) in the bottom half handler\n",
+ fwevt->event_id);
+
switch (fwevt->event_id) {
case MPI3_EVENT_DEVICE_ADDED:
{
@@ -2763,6 +2783,9 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
goto out;
dev_handle = le16_to_cpu(evtdata->dev_handle);
+ dprint_event_th(mrioc,
+ "device status change event top half with rc(0x%02x) for handle(0x%04x)\n",
+ evtdata->reason_code, dev_handle);
switch (evtdata->reason_code) {
case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
@@ -2786,8 +2809,12 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
}
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
- if (!tgtdev)
+ if (!tgtdev) {
+ dprint_event_th(mrioc,
+ "processing device status change event could not identify device for handle(0x%04x)\n",
+ dev_handle);
goto out;
+ }
if (hide)
tgtdev->is_hidden = hide;
if (tgtdev->starget && tgtdev->starget->hostdata) {
@@ -2863,13 +2890,13 @@ static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
if (shutdown_timeout <= 0) {
- ioc_warn(mrioc,
+ dprint_event_th(mrioc,
"%s :Invalid Shutdown Timeout received = %d\n",
__func__, shutdown_timeout);
return;
}
- ioc_info(mrioc,
+ dprint_event_th(mrioc,
"%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
__func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
mrioc->facts.shutdown_timeout = shutdown_timeout;
@@ -2945,9 +2972,9 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
* @mrioc: Adapter instance reference
* @event_reply: event data
*
- * Identify whteher the event has to handled and acknowledged
- * and either process the event in the tophalf and/or schedule a
- * bottom half through mpi3mr_fwevt_worker.
+ * Identifies whether the event has to be handled and acknowledged,
+ * and either processes the event in the top-half and/or schedule a
+ * bottom-half through mpi3mr_fwevt_worker().
*
* Return: Nothing
*/
@@ -2974,9 +3001,11 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
struct mpi3_device_page0 *dev_pg0 =
(struct mpi3_device_page0 *)event_reply->event_data;
if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
- ioc_err(mrioc,
- "%s :Failed to add device in the device add event\n",
- __func__);
+ dprint_event_th(mrioc,
+ "failed to process device added event for handle(0x%04x),\n"
+ "perst_id(%d) in the event top half handler\n",
+ le16_to_cpu(dev_pg0->dev_handle),
+ le16_to_cpu(dev_pg0->persistent_id));
else
process_evt_bh = 1;
break;
@@ -3039,11 +3068,15 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
break;
}
if (process_evt_bh || ack_req) {
+ dprint_event_th(mrioc,
+ "scheduling bottom half handler for event(0x%02x),ack_required=%d\n",
+ evt_type, ack_req);
sz = event_reply->event_data_length * 4;
fwevt = mpi3mr_alloc_fwevt(sz);
if (!fwevt) {
- ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
- __func__, __FILE__, __LINE__, __func__);
+ dprint_event_th(mrioc,
+ "failed to schedule bottom half handler for\n"
+ "event(0x%02x), ack_required=%d\n", evt_type, ack_req);
return;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 063b10dd8251..02fc204b9bf7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -2869,8 +2869,9 @@ _ctl_get_mpt_mctp_passthru_adapter(int dev_index)
if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
if (count == dev_index) {
spin_unlock(&gioc_lock);
- return 0;
+ return ioc;
}
+ count++;
}
}
spin_unlock(&gioc_lock);
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
index c25a5dfe7889..749f616b21af 100644
--- a/drivers/scsi/mvsas/mv_64xx.h
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -101,8 +101,8 @@ enum sas_sata_vsp_regs {
VSR_PHY_MODE9 = 0x09, /* Test */
VSR_PHY_MODE10 = 0x0A, /* Power */
VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
- VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
- VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
+ VSR_PHY_VS0 = 0x0C, /* Vendor Specific 0 */
+ VSR_PHY_VS1 = 0x0D, /* Vendor Specific 1 */
};
enum chip_register_bits {
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 85ff95c6543a..7618f9cc9986 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -644,7 +644,7 @@ static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL);
#define FLASH_CMD_SET_NVMD 0x02
struct flash_command {
- u8 command[8];
+ u8 command[8] __nonstring;
int code;
};
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
index 2ebef4d20b5b..2f3e044b818f 100644
--- a/drivers/scsi/qedi/qedi_dbg.c
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -103,25 +103,3 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
ret:
va_end(va);
}
-
-int
-qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
-{
- int ret = 0;
-
- for (; iter->name; iter++) {
- ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
- iter->attr);
- if (ret)
- pr_err("Unable to create sysfs %s attr, err(%d).\n",
- iter->name, ret);
- }
- return ret;
-}
-
-void
-qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
-{
- for (; iter->name; iter++)
- sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
-}
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index 5a1ec4542183..864951865869 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -87,18 +87,6 @@ void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
u32 info, const char *fmt, ...);
-struct Scsi_Host;
-
-struct sysfs_bin_attrs {
- char *name;
- const struct bin_attribute *attr;
-};
-
-int qedi_create_sysfs_attr(struct Scsi_Host *shost,
- struct sysfs_bin_attrs *iter);
-void qedi_remove_sysfs_attr(struct Scsi_Host *shost,
- struct sysfs_bin_attrs *iter);
-
/* DebugFS related code */
struct qedi_list_of_funcs {
char *oper_str;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 772218445a56..5e10441f2e22 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -45,7 +45,6 @@ int qedi_iscsi_cleanup_task(struct iscsi_task *task,
void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd);
void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
struct qedi_cmd *qedi_cmd);
-void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
struct iscsi_eqe_data *data);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e87885cc701c..b168bb2178e9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1877,14 +1877,6 @@ void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
WARN_ON(1);
}
-void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
-{
- *proto_itt = qedi->itt_map[tid].itt;
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
- "Get itt map tid [0x%x with proto itt[0x%x]",
- tid, *proto_itt);
-}
-
struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
{
struct qedi_cmd *cmd = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 691ef827a5ab..5136549005e7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2706,59 +2706,6 @@ ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
}
/*
- * This function is for formatting and logging log messages.
- * It is to be used when vha is available. It formats the message
- * and logs it to the messages file. All the messages will be logged
- * irrespective of value of ql2xextended_error_logging.
- * parameters:
- * level: The level of the log messages to be printed in the
- * messages file.
- * vha: Pointer to the scsi_qla_host_t
- * id: This is a unique id for the level. It identifies the
- * part of the code from where the message originated.
- * msg: The message to be displayed.
- */
-void
-ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
- const char *fmt, ...)
-{
- va_list va;
- struct va_format vaf;
- char pbuf[128];
-
- if (level > ql_errlev)
- return;
-
- ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
-
- if (!pbuf[0]) /* set by ql_ktrace */
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
- qpair ? qpair->vha : NULL, id);
-
- va_start(va, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &va;
-
- switch (level) {
- case ql_log_fatal: /* FATAL LOG */
- pr_crit("%s%pV", pbuf, &vaf);
- break;
- case ql_log_warn:
- pr_err("%s%pV", pbuf, &vaf);
- break;
- case ql_log_info:
- pr_warn("%s%pV", pbuf, &vaf);
- break;
- default:
- pr_info("%s%pV", pbuf, &vaf);
- break;
- }
-
- va_end(va);
-}
-
-/*
* This function is for formatting and logging debug information.
* It is to be used when vha is available. It formats the message
* and logs it to the messages file.
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 54f0a412226f..5f4a8c9ae6ba 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -334,9 +334,6 @@ ql_log(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...);
-void __attribute__((format (printf, 4, 5)))
-ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
-
/* Debug Levels */
/* The 0x40000000 is the max value any debug level can have
* as ql2xextended_error_logging is of type signed int
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e556f57c91af..03e50e8fc08d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -164,10 +164,8 @@ extern int ql2xsmartsan;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xextended_error_logging_ktrace;
-extern int ql2xiidmaenable;
extern int ql2xmqsupport;
extern int ql2xfwloadbin;
-extern int ql2xetsenable;
extern int ql2xshiftctondsd;
extern int ql2xdbwr;
extern int ql2xasynctmfenable;
@@ -720,7 +718,6 @@ extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_fdmi_register(scsi_qla_host_t *);
extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
-extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
extern size_t qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
@@ -822,7 +819,6 @@ extern int qlafx00_rescan_isp(scsi_qla_host_t *);
/* PCI related functions */
extern int qla82xx_pci_config(struct scsi_qla_host *);
extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
-extern int qla82xx_pci_region_offset(struct pci_dev *, int);
extern int qla82xx_iospace_config(struct qla_hw_data *);
/* Initialization related functions */
@@ -866,7 +862,6 @@ extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
/* ISP 8021 IDC */
extern void qla82xx_clear_drv_active(struct qla_hw_data *);
-extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
extern int qla82xx_idc_lock(struct qla_hw_data *);
extern void qla82xx_idc_unlock(struct qla_hw_data *);
extern int qla82xx_device_state_handler(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index d2bddca7045a..51c7cea71f90 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2626,96 +2626,6 @@ qla2x00_port_speed_capability(uint16_t speed)
}
/**
- * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
- * @vha: HA context
- * @list: switch info entries to populate
- *
- * Returns 0 on success.
- */
-int
-qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
-{
- int rval;
- uint16_t i;
- struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
-
- if (!IS_IIDMA_CAPABLE(ha))
- return QLA_FUNCTION_FAILED;
- if (!ha->flags.gpsc_supported)
- return QLA_FUNCTION_FAILED;
-
- rval = qla2x00_mgmt_svr_login(vha);
- if (rval)
- return rval;
-
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = GPSC_REQ_SIZE;
- arg.rsp_size = GPSC_RSP_SIZE;
- arg.nport_handle = vha->mgmt_svr_loop_id;
-
- for (i = 0; i < ha->max_fibre_devices; i++) {
- /* Issue GFPN_ID */
- /* Prepare common MS IOCB */
- ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
-
- /* Prepare CT request */
- ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
- GPSC_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare CT arguments -- port_name */
- memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
- WWN_SIZE);
-
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2059,
- "GPSC issue IOCB failed (%d).\n", rval);
- } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
- "GPSC")) != QLA_SUCCESS) {
- /* FM command unsupported? */
- if (rval == QLA_INVALID_COMMAND &&
- (ct_rsp->header.reason_code ==
- CT_REASON_INVALID_COMMAND_CODE ||
- ct_rsp->header.reason_code ==
- CT_REASON_COMMAND_UNSUPPORTED)) {
- ql_dbg(ql_dbg_disc, vha, 0x205a,
- "GPSC command unsupported, disabling "
- "query.\n");
- ha->flags.gpsc_supported = 0;
- rval = QLA_FUNCTION_FAILED;
- break;
- }
- rval = QLA_FUNCTION_FAILED;
- } else {
- list->fp_speed = qla2x00_port_speed_capability(
- be16_to_cpu(ct_rsp->rsp.gpsc.speed));
- ql_dbg(ql_dbg_disc, vha, 0x205b,
- "GPSC ext entry - fpn "
- "%8phN speeds=%04x speed=%04x.\n",
- list[i].fabric_port_name,
- be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
- be16_to_cpu(ct_rsp->rsp.gpsc.speed));
- }
-
- /* Last device exit. */
- if (list[i].d_id.b.rsvd_1 != 0)
- break;
- }
-
- return (rval);
-}
-
-/**
* qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
*
* @vha: HA context
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 6dfb70edb9a6..0cd3db8ed4ef 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1099,11 +1099,6 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
unsigned offset, n;
struct qla_hw_data *ha = vha->hw;
- struct crb_addr_pair {
- long addr;
- long data;
- };
-
/* Halt all the individual PEGs and other blocks of the ISP */
qla82xx_rom_lock(ha);
@@ -1595,25 +1590,6 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
return (u8 *)&ha->hablob->fw->data[offset];
}
-/* PCI related functions */
-int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
-{
- unsigned long val = 0;
- u32 control;
-
- switch (region) {
- case 0:
- val = 0;
- break;
- case 1:
- pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
- val = control + QLA82XX_MSIX_TBL_SPACE;
- break;
- }
- return val;
-}
-
-
int
qla82xx_iospace_config(struct qla_hw_data *ha)
{
@@ -2934,32 +2910,6 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
}
}
-/*
-* qla82xx_wait_for_state_change
-* Wait for device state to change from given current state
-*
-* Note:
-* IDC lock must not be held upon entry
-*
-* Return:
-* Changed device state.
-*/
-uint32_t
-qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
-{
- struct qla_hw_data *ha = vha->hw;
- uint32_t dev_state;
-
- do {
- msleep(1000);
- qla82xx_idc_lock(ha);
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla82xx_idc_unlock(ha);
- } while (dev_state == curr_state);
-
- return dev_state;
-}
-
void
qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
{
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b44d134e7105..288ce04fc2b1 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -176,12 +176,6 @@ MODULE_PARM_DESC(ql2xenablehba_err_chk,
" 1 -- Error isolation enabled only for DIX Type 0\n"
" 2 -- Error isolation enabled for all Types\n");
-int ql2xiidmaenable = 1;
-module_param(ql2xiidmaenable, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xiidmaenable,
- "Enables iIDMA settings "
- "Default is 1 - perform iIDMA. 0 - no iIDMA.");
-
int ql2xmqsupport = 1;
module_param(ql2xmqsupport, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmqsupport,
@@ -199,12 +193,6 @@ MODULE_PARM_DESC(ql2xfwloadbin,
" 1 -- load firmware from flash.\n"
" 0 -- use default semantics.\n");
-int ql2xetsenable;
-module_param(ql2xetsenable, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xetsenable,
- "Enables firmware ETS burst."
- "Default is 0 - skip ETS enablement.");
-
int ql2xdbwr = 1;
module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xdbwr,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 11eadb3bd36e..1e81582085e3 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1454,50 +1454,6 @@ static struct fc_port *qlt_create_sess(
return sess;
}
-/*
- * max_gen - specifies maximum session generation
- * at which this deletion requestion is still valid
- */
-void
-qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
-{
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct fc_port *sess = fcport;
- unsigned long flags;
-
- if (!vha->hw->tgt.tgt_ops)
- return;
-
- if (!tgt)
- return;
-
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- if (tgt->tgt_stop) {
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- return;
- }
- if (!sess->se_sess) {
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- return;
- }
-
- if (max_gen - sess->generation < 0) {
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
- "Ignoring stale deletion request for se_sess %p / sess %p"
- " for port %8phC, req_gen %d, sess_gen %d\n",
- sess->se_sess, sess, sess->port_name, max_gen,
- sess->generation);
- return;
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
-
- sess->local = 1;
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- qlt_schedule_sess_for_deletion(sess);
-}
-
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
{
struct qla_hw_data *ha = tgt->ha;
@@ -5539,81 +5495,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
-int
-qlt_free_qfull_cmds(struct qla_qpair *qpair)
-{
- struct scsi_qla_host *vha = qpair->vha;
- struct qla_hw_data *ha = vha->hw;
- unsigned long flags;
- struct qla_tgt_cmd *cmd, *tcmd;
- struct list_head free_list, q_full_list;
- int rc = 0;
-
- if (list_empty(&ha->tgt.q_full_list))
- return 0;
-
- INIT_LIST_HEAD(&free_list);
- INIT_LIST_HEAD(&q_full_list);
-
- spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
- if (list_empty(&ha->tgt.q_full_list)) {
- spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
- return 0;
- }
-
- list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
- spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
-
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
- if (cmd->q_full)
- /* cmd->state is a borrowed field to hold status */
- rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
- else if (cmd->term_exchg)
- rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
-
- if (rc == -ENOMEM)
- break;
-
- if (cmd->q_full)
- ql_dbg(ql_dbg_io, vha, 0x3006,
- "%s: busy sent for ox_id[%04x]\n", __func__,
- be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
- else if (cmd->term_exchg)
- ql_dbg(ql_dbg_io, vha, 0x3007,
- "%s: Term exchg sent for ox_id[%04x]\n", __func__,
- be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
- else
- ql_dbg(ql_dbg_io, vha, 0x3008,
- "%s: Unexpected cmd in QFull list %p\n", __func__,
- cmd);
-
- list_move_tail(&cmd->cmd_list, &free_list);
-
- /* piggy back on hardware_lock for protection */
- vha->hw->tgt.num_qfull_cmds_alloc--;
- }
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
- cmd = NULL;
-
- list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
- list_del(&cmd->cmd_list);
- /* This cmd was never sent to TCM. There is no need
- * to schedule free or call free_cmd
- */
- qlt_free_cmd(cmd);
- }
-
- if (!list_empty(&q_full_list)) {
- spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
- list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
- spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
- }
-
- return rc;
-}
-
static void
qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
uint16_t status)
@@ -7091,16 +6972,6 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
}
void
-qlt_83xx_iospace_config(struct qla_hw_data *ha)
-{
- if (!QLA_TGT_MODE_ENABLED())
- return;
-
- ha->msix_count += 1; /* For ATIO Q */
-}
-
-
-void
qlt_modify_vp_config(struct scsi_qla_host *vha,
struct vp_config_entry_24xx *vpmod)
{
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 354fca2e7feb..15a59c125c53 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1014,7 +1014,6 @@ extern int qlt_lport_register(void *, u64, u64, u64,
extern void qlt_lport_deregister(struct scsi_qla_host *);
extern void qlt_unreg_sess(struct fc_port *);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
-extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
extern void qlt_exit(void);
extern void qlt_free_session_done(struct work_struct *);
@@ -1082,8 +1081,6 @@ extern void qlt_mem_free(struct qla_hw_data *);
extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
-extern void qlt_83xx_iospace_config(struct qla_hw_data *);
-extern int qlt_free_qfull_cmds(struct qla_qpair *);
extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 47adff9f0506..da2fc66ffedd 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -973,11 +973,6 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
unsigned long off;
unsigned offset, n;
- struct crb_addr_pair {
- long addr;
- long data;
- };
-
/* Halt all the indiviual PEGs and other blocks of the ISP */
qla4_82xx_rom_lock(ha);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f0eec4708ddd..aef33d1e346a 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -162,7 +162,7 @@ static const char *sdebug_version_date = "20210520";
#define DEF_VPD_USE_HOSTNO 1
#define DEF_WRITESAME_LENGTH 0xFFFF
#define DEF_ATOMIC_WR 0
-#define DEF_ATOMIC_WR_MAX_LENGTH 8192
+#define DEF_ATOMIC_WR_MAX_LENGTH 128
#define DEF_ATOMIC_WR_ALIGN 2
#define DEF_ATOMIC_WR_GRAN 2
#define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
@@ -294,6 +294,14 @@ struct tape_block {
#define FF_SA (F_SA_HIGH | F_SA_LOW)
#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
+/* Device selection bit mask */
+#define DS_ALL 0xffffffff
+#define DS_SBC (1 << TYPE_DISK)
+#define DS_SSC (1 << TYPE_TAPE)
+#define DS_ZBC (1 << TYPE_ZBC)
+
+#define DS_NO_SSC (DS_ALL & ~DS_SSC)
+
#define SDEBUG_MAX_PARTS 4
#define SDEBUG_MAX_CMD_LEN 32
@@ -472,6 +480,7 @@ struct opcode_info_t {
/* for terminating element */
u8 opcode; /* if num_attached > 0, preferred */
u16 sa; /* service action */
+ u32 devsel; /* device type mask for this definition */
u32 flags; /* OR-ed set of SDEB_F_* */
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
const struct opcode_info_t *arrp; /* num_attached elements or NULL */
@@ -519,7 +528,8 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_FILEMARKS = 35,
SDEB_I_SPACE = 36,
SDEB_I_FORMAT_MEDIUM = 37,
- SDEB_I_LAST_ELEM_P1 = 38, /* keep this last (previous + 1) */
+ SDEB_I_ERASE = 38,
+ SDEB_I_LAST_ELEM_P1 = 39, /* keep this last (previous + 1) */
};
@@ -530,7 +540,7 @@ static const unsigned char opcode_ind_arr[256] = {
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
- 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
+ 0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
SDEB_I_ALLOW_REMOVAL, 0,
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
@@ -585,7 +595,9 @@ static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -613,8 +625,10 @@ static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *);
static int sdebug_do_add_host(bool mk_new_store);
static int sdebug_add_host_helper(int per_host_idx);
@@ -629,113 +643,121 @@ static void sdebug_erase_all_stores(bool apart_from_first);
* should be placed in opcode_info_arr[], the others should be placed here.
*/
static const struct opcode_info_t msense_iarr[] = {
- {0, 0x1a, 0, F_D_IN, NULL, NULL,
+ {0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL,
{6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t mselect_iarr[] = {
- {0, 0x15, 0, F_D_OUT, NULL, NULL,
+ {0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL,
{6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t read_iarr[] = {
- {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
+ {0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
+ {0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */
{6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
+ {0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */
+ {6, 0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
{12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
0xc7, 0, 0, 0, 0} },
};
static const struct opcode_info_t write_iarr[] = {
- {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
+ {0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
- {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
+ {0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */
NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0} },
- {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
+ {0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */
+ NULL, {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0} },
+ {0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xbf, 0xc7, 0, 0, 0, 0} },
};
static const struct opcode_info_t verify_iarr[] = {
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
+ {0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t sa_in_16_iarr[] = {
- {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
+ {0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
- {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
+ {0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
{16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
0, 0} }, /* GET STREAM STATUS */
};
static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
- {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
+ {0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
- {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ {0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
};
static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
- {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
+ {0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
{12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
- {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
+ {0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
{12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
};
static const struct opcode_info_t write_same_iarr[] = {
- {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
+ {0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
};
static const struct opcode_info_t reserve_iarr[] = {
- {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
+ {0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RESERVE(6) */
{6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t release_iarr[] = {
- {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
+ {0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RELEASE(6) */
{6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t sync_cache_iarr[] = {
- {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
+ {0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
{16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
};
static const struct opcode_info_t pre_fetch_iarr[] = {
- {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
+ {0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
{16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
+ {0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL,
+ {10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} }, /* READ POSITION (10) */
};
static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
- {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
+ {0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
{16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
- {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
+ {0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
{16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
- {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
+ {0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
{16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
};
static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
- {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
+ {0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
{16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
};
@@ -746,130 +768,132 @@ static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
* REPORT SUPPORTED OPERATION CODES. */
static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 0 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
+ {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
+ {0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
{6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
+ {0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
{12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
0, 0} }, /* REPORT LUNS */
- {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
+ {0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL,
{6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
+ {0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
{6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
/* 5 */
- {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
+ {ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN, /* MODE SENSE(10) */
resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
+ {ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT, /* MODE SELECT(10) */
resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
+ {0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
{10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
0, 0, 0} },
- {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
+ {0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
{10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
0, 0} },
- {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
+ {ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */
resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
/* 10 */
- {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
+ {ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
resp_write_dt0, write_iarr, /* WRITE(16) */
{16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
- {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
+ {0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
{6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
+ {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN,
resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
{16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
- {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ {0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
- {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
+ {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN,
resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
0xff, 0, 0xc7, 0, 0, 0, 0} },
/* 15 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
+ {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {ARRAY_SIZE(verify_iarr), 0x8f, 0,
+ {ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC,
F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
- {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
+ {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
{32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
0xff, 0xff} },
- {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
+ {ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT,
NULL, reserve_iarr, /* RESERVE(10) <no response function> */
{10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
- {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
+ {ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT,
NULL, release_iarr, /* RELEASE(10) <no response function> */
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
/* 20 */
- {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
+ {0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */
{6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x1, 0, 0, resp_rewind, NULL,
+ {0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL,
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
+ {0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
+ {0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL, /* SEND DIAGNOSTIC */
{6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
+ {0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
{10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
/* 25 */
- {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
+ {0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL,
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* WRITE_BUFFER */
- {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
+ {ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO,
resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
0, 0, 0, 0, 0} },
- {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
+ {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS,
resp_sync_cache, sync_cache_iarr,
{10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* SYNC_CACHE (10) */
- {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
+ {0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
- {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
+ {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO,
resp_pre_fetch, pre_fetch_iarr,
{10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* PRE-FETCH (10) */
/* READ POSITION (10) */
/* 30 */
- {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
+ {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
{16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
- {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
+ {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
{16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
/* 32 */
- {0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
+ {0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
- {0, 0x05, 0, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
+ {0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
{6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2b, 0, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
+ {0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
{10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x10, 0, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
+ {0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
{6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x11, 0, F_D_IN, resp_space, NULL, /* SPACE (6) */
+ {0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL, /* SPACE (6) */
{6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x4, 0, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
+ {0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
{6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-/* 38 */
+ {0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL, /* ERASE (6) */
+ {6, 0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+/* 39 */
/* sentinel */
- {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
+ {0xff, 0, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
@@ -1015,6 +1039,19 @@ static const int condition_met_result = SAM_STAT_CONDITION_MET;
static struct dentry *sdebug_debugfs_root;
static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
+static u32 sdebug_get_devsel(struct scsi_device *sdp)
+{
+ unsigned char devtype = sdp->type;
+ u32 devsel;
+
+ if (devtype < 32)
+ devsel = (1 << devtype);
+ else
+ devsel = DS_ALL;
+
+ return devsel;
+}
+
static void sdebug_err_free(struct rcu_head *head)
{
struct sdebug_err_inject *inject =
@@ -2032,13 +2069,19 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned char *cmd = scp->cmnd;
u32 alloc_len, n;
int ret;
- bool have_wlun, is_disk, is_zbc, is_disk_zbc;
+ bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape;
alloc_len = get_unaligned_be16(cmd + 3);
arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
- is_disk = (sdebug_ptype == TYPE_DISK);
+ if (scp->device->type >= 32) {
+ is_disk = (sdebug_ptype == TYPE_DISK);
+ is_tape = (sdebug_ptype == TYPE_TAPE);
+ } else {
+ is_disk = (scp->device->type == TYPE_DISK);
+ is_tape = (scp->device->type == TYPE_TAPE);
+ }
is_zbc = devip->zoned;
is_disk_zbc = (is_disk || is_zbc);
have_wlun = scsi_is_wlun(scp->device->lun);
@@ -2047,7 +2090,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
else
- pq_pdt = (sdebug_ptype & 0x1f);
+ pq_pdt = ((scp->device->type >= 32 ?
+ sdebug_ptype : scp->device->type) & 0x1f);
arr[0] = pq_pdt;
if (0x2 & cmd[1]) { /* CMDDT bit set */
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
@@ -2170,7 +2214,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (is_disk) { /* SBC-4 no version claimed */
put_unaligned_be16(0x600, arr + n);
n += 2;
- } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
+ } else if (is_tape) { /* SSC-4 rev 3 */
put_unaligned_be16(0x525, arr + n);
n += 2;
} else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
@@ -2279,7 +2323,7 @@ static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
changing = (stopped_state != want_stop);
if (changing)
atomic_xchg(&devip->stopped, want_stop);
- if (sdebug_ptype == TYPE_TAPE && !want_stop) {
+ if (scp->device->type == TYPE_TAPE && !want_stop) {
int i;
set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
@@ -2454,11 +2498,12 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
u8 reporting_opts, req_opcode, sdeb_i, supp;
u16 req_sa, u;
u32 alloc_len, a_len;
- int k, offset, len, errsts, count, bump, na;
+ int k, offset, len, errsts, bump, na;
const struct opcode_info_t *oip;
const struct opcode_info_t *r_oip;
u8 *arr;
u8 *cmd = scp->cmnd;
+ u32 devsel = sdebug_get_devsel(scp->device);
rctd = !!(cmd[2] & 0x80);
reporting_opts = cmd[2] & 0x7;
@@ -2481,34 +2526,30 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
}
switch (reporting_opts) {
case 0: /* all commands */
- /* count number of commands */
- for (count = 0, oip = opcode_info_arr;
- oip->num_attached != 0xff; ++oip) {
- if (F_INV_OP & oip->flags)
- continue;
- count += (oip->num_attached + 1);
- }
bump = rctd ? 20 : 8;
- put_unaligned_be32(count * bump, arr);
for (offset = 4, oip = opcode_info_arr;
oip->num_attached != 0xff && offset < a_len; ++oip) {
if (F_INV_OP & oip->flags)
continue;
+ if ((devsel & oip->devsel) != 0) {
+ arr[offset] = oip->opcode;
+ put_unaligned_be16(oip->sa, arr + offset + 2);
+ if (rctd)
+ arr[offset + 5] |= 0x2;
+ if (FF_SA & oip->flags)
+ arr[offset + 5] |= 0x1;
+ put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
+ if (rctd)
+ put_unaligned_be16(0xa, arr + offset + 8);
+ offset += bump;
+ }
na = oip->num_attached;
- arr[offset] = oip->opcode;
- put_unaligned_be16(oip->sa, arr + offset + 2);
- if (rctd)
- arr[offset + 5] |= 0x2;
- if (FF_SA & oip->flags)
- arr[offset + 5] |= 0x1;
- put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
- if (rctd)
- put_unaligned_be16(0xa, arr + offset + 8);
r_oip = oip;
for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
if (F_INV_OP & oip->flags)
continue;
- offset += bump;
+ if ((devsel & oip->devsel) == 0)
+ continue;
arr[offset] = oip->opcode;
put_unaligned_be16(oip->sa, arr + offset + 2);
if (rctd)
@@ -2516,14 +2557,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
if (FF_SA & oip->flags)
arr[offset + 5] |= 0x1;
put_unaligned_be16(oip->len_mask[0],
- arr + offset + 6);
+ arr + offset + 6);
if (rctd)
put_unaligned_be16(0xa,
arr + offset + 8);
+ offset += bump;
}
oip = r_oip;
- offset += bump;
}
+ put_unaligned_be32(offset - 4, arr);
break;
case 1: /* one command: opcode only */
case 2: /* one command: opcode plus service action */
@@ -2549,13 +2591,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
return check_condition_result;
}
if (0 == (FF_SA & oip->flags) &&
- req_opcode == oip->opcode)
+ (devsel & oip->devsel) != 0 &&
+ req_opcode == oip->opcode)
supp = 3;
else if (0 == (FF_SA & oip->flags)) {
na = oip->num_attached;
for (k = 0, oip = oip->arrp; k < na;
++k, ++oip) {
- if (req_opcode == oip->opcode)
+ if (req_opcode == oip->opcode &&
+ (devsel & oip->devsel) != 0)
break;
}
supp = (k >= na) ? 1 : 3;
@@ -2563,7 +2607,8 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
na = oip->num_attached;
for (k = 0, oip = oip->arrp; k < na;
++k, ++oip) {
- if (req_sa == oip->sa)
+ if (req_sa == oip->sa &&
+ (devsel & oip->devsel) != 0)
break;
}
supp = (k >= na) ? 1 : 3;
@@ -2914,9 +2959,9 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
subpcode = cmd[3];
msense_6 = (MODE_SENSE == cmd[0]);
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
- is_disk = (sdebug_ptype == TYPE_DISK);
+ is_disk = (scp->device->type == TYPE_DISK);
is_zbc = devip->zoned;
- is_tape = (sdebug_ptype == TYPE_TAPE);
+ is_tape = (scp->device->type == TYPE_TAPE);
if ((is_disk || is_zbc || is_tape) && !dbd)
bd_len = llbaa ? 16 : 8;
else
@@ -3131,7 +3176,7 @@ static int resp_mode_select(struct scsi_cmnd *scp,
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
off = (mselect6 ? 4 : 8);
- if (sdebug_ptype == TYPE_TAPE) {
+ if (scp->device->type == TYPE_TAPE) {
int blksize;
if (bd_len != 8) {
@@ -3196,7 +3241,7 @@ static int resp_mode_select(struct scsi_cmnd *scp,
}
break;
case 0xf: /* Compression mode page */
- if (sdebug_ptype != TYPE_TAPE)
+ if (scp->device->type != TYPE_TAPE)
goto bad_pcode;
if ((arr[off + 2] & 0x40) != 0) {
devip->tape_dce = (arr[off + 2] & 0x80) != 0;
@@ -3204,7 +3249,7 @@ static int resp_mode_select(struct scsi_cmnd *scp,
}
break;
case 0x11: /* Medium Partition Mode Page (tape) */
- if (sdebug_ptype == TYPE_TAPE) {
+ if (scp->device->type == TYPE_TAPE) {
int fld;
fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
@@ -3563,6 +3608,30 @@ is_eop:
return check_condition_result;
}
+enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
+static int resp_read_position(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ int all_length;
+ unsigned char arr[20];
+ unsigned int pos;
+
+ all_length = get_unaligned_be16(cmd + 7);
+ if ((cmd[1] & 0xfe) != 0 ||
+ all_length != 0) { /* only short form */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB,
+ all_length ? 7 : 1, 0);
+ return check_condition_result;
+ }
+ memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
+ arr[1] = devip->tape_partition;
+ pos = devip->tape_location[devip->tape_partition];
+ put_unaligned_be32(pos, arr + 4);
+ put_unaligned_be32(pos, arr + 8);
+ return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ);
+}
+
static int resp_rewind(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -3604,10 +3673,6 @@ static int resp_format_medium(struct scsi_cmnd *scp,
int res = 0;
unsigned char *cmd = scp->cmnd;
- if (sdebug_ptype != TYPE_TAPE) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1);
- return check_condition_result;
- }
if (cmd[2] > 2) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
return check_condition_result;
@@ -3631,6 +3696,19 @@ static int resp_format_medium(struct scsi_cmnd *scp,
return 0;
}
+static int resp_erase(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int partition = devip->tape_partition;
+ int pos = devip->tape_location[partition];
+ struct tape_block *blp;
+
+ blp = devip->tape_blocks[partition] + pos;
+ blp->fl_size = TAPE_BLOCK_EOD_FLAG;
+
+ return 0;
+}
+
static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
{
return devip->nr_zones != 0;
@@ -4467,9 +4545,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
- if (sdebug_ptype == TYPE_TAPE)
- return resp_read_tape(scp, devip);
-
switch (cmd[0]) {
case READ_16:
ei_lba = 0;
@@ -4839,9 +4914,6 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
- if (sdebug_ptype == TYPE_TAPE)
- return resp_write_tape(scp, devip);
-
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -5573,7 +5645,6 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
*
* The pcode 0x34 is also used for READ POSITION by tape devices.
*/
-enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -5585,31 +5656,6 @@ static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdeb_store_info *sip = devip2sip(devip, true);
u8 *fsp = sip->storep;
- if (sdebug_ptype == TYPE_TAPE) {
- if (cmd[0] == PRE_FETCH) { /* READ POSITION (10) */
- int all_length;
- unsigned char arr[20];
- unsigned int pos;
-
- all_length = get_unaligned_be16(cmd + 7);
- if ((cmd[1] & 0xfe) != 0 ||
- all_length != 0) { /* only short form */
- mk_sense_invalid_fld(scp, SDEB_IN_CDB,
- all_length ? 7 : 1, 0);
- return check_condition_result;
- }
- memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
- arr[1] = devip->tape_partition;
- pos = devip->tape_location[devip->tape_partition];
- put_unaligned_be32(pos, arr + 4);
- put_unaligned_be32(pos, arr + 8);
- return fill_from_dev_buffer(scp, arr,
- SDEBUG_READ_POSITION_ARR_SZ);
- }
- mk_sense_invalid_opcode(scp);
- return check_condition_result;
- }
-
if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
lba = get_unaligned_be32(cmd + 2);
nblks = get_unaligned_be16(cmd + 7);
@@ -6645,7 +6691,7 @@ static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
debugfs_remove(devip->debugfs_entry);
- if (sdebug_ptype == TYPE_TAPE) {
+ if (sdp->type == TYPE_TAPE) {
kfree(devip->tape_blocks[0]);
devip->tape_blocks[0] = NULL;
}
@@ -6833,18 +6879,16 @@ static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
{
- if (sdebug_ptype == TYPE_TAPE) {
- int i;
+ int i;
- devip->tape_blksize = TAPE_DEF_BLKSIZE;
- devip->tape_density = TAPE_DEF_DENSITY;
- devip->tape_partition = 0;
- devip->tape_dce = 0;
- for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
- devip->tape_location[i] = 0;
- devip->tape_pending_nbr_partitions = -1;
- /* Don't reset partitioning? */
- }
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_partition = 0;
+ devip->tape_dce = 0;
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_pending_nbr_partitions = -1;
+ /* Don't reset partitioning? */
}
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
@@ -6862,7 +6906,8 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
scsi_debug_stop_all_queued(sdp);
if (devip) {
set_bit(SDEBUG_UA_POR, devip->uas_bm);
- scsi_tape_reset_clear(devip);
+ if (SCpnt->device->type == TYPE_TAPE)
+ scsi_tape_reset_clear(devip);
}
if (sdebug_fail_lun_reset(SCpnt)) {
@@ -6901,7 +6946,8 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if (devip->target == sdp->id) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- scsi_tape_reset_clear(devip);
+ if (SCpnt->device->type == TYPE_TAPE)
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6933,7 +6979,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- scsi_tape_reset_clear(devip);
+ if (SCpnt->device->type == TYPE_TAPE)
+ scsi_tape_reset_clear(devip);
++k;
}
@@ -6957,7 +7004,8 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- scsi_tape_reset_clear(devip);
+ if (SCpnt->device->type == TYPE_TAPE)
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -9173,6 +9221,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
u32 flags;
u16 sa;
u8 opcode = cmd[0];
+ u32 devsel = sdebug_get_devsel(scp->device);
bool has_wlun_rl;
bool inject_now;
int ret = 0;
@@ -9252,12 +9301,14 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
else
sa = get_unaligned_be16(cmd + 8);
for (k = 0; k <= na; oip = r_oip->arrp + k++) {
- if (opcode == oip->opcode && sa == oip->sa)
+ if (opcode == oip->opcode && sa == oip->sa &&
+ (devsel & oip->devsel) != 0)
break;
}
} else { /* since no service action only check opcode */
for (k = 0; k <= na; oip = r_oip->arrp + k++) {
- if (opcode == oip->opcode)
+ if (opcode == oip->opcode &&
+ (devsel & oip->devsel) != 0)
break;
}
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 90f1393a23f8..a348df895dca 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -486,33 +486,6 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
}
/**
- * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
- * @vendor: vendor string
- * @model: model (product) string
- * @key: specify list to use
- *
- * Description:
- * Remove and destroy one dev_info entry for @vendor, @model
- * in list specified by @key.
- *
- * Returns: 0 OK, -error on failure.
- **/
-int scsi_dev_info_list_del_keyed(char *vendor, char *model,
- enum scsi_devinfo_key key)
-{
- struct scsi_dev_info_list *found;
-
- found = scsi_dev_info_list_find(vendor, model, key);
- if (IS_ERR(found))
- return PTR_ERR(found);
-
- list_del(&found->dev_info_list);
- kfree(found);
- return 0;
-}
-EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
-
-/**
* scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
* @dev_list: string of device flags to add
*
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 9fc397a9ce7a..5b2b19f5e8ec 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -79,8 +79,6 @@ extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
blist_flags_t flags,
enum scsi_devinfo_key key);
-extern int scsi_dev_info_list_del_keyed(char *vendor, char *model,
- enum scsi_devinfo_key key);
extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name);
extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 082f76e76721..6b165a3ec6de 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3509,7 +3509,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
* state as the LLDD would not have had an rport
* reference to pass us.
*
- * Take no action on the del_timer failure as the state
+ * Take no action on the timer_delete() failure as the state
* machine state change will validate the
* transaction.
*/
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 950d8c9fb884..3f6e87705b62 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3215,7 +3215,7 @@ static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id)
return false;
if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status))
return false;
- return buf.h.stream_status[0].perm;
+ return buf.s.perm;
}
static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index effb7e768165..3c02a5f7b5f3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1658,8 +1658,7 @@ static void register_sg_sysctls(void)
static void unregister_sg_sysctls(void)
{
- if (hdr)
- unregister_sysctl_table(hdr);
+ unregister_sysctl_table(hdr);
}
#else
#define register_sg_sysctls() do { } while (0)
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 8a26eca4fdc9..1d784ee7671c 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.30-031"
+#define DRIVER_VERSION "2.1.34-035"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 30
-#define DRIVER_REVISION 31
+#define DRIVER_RELEASE 34
+#define DRIVER_REVISION 35
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -68,6 +68,7 @@ static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
static void pqi_verify_structures(void);
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
+static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info);
static void pqi_ctrl_offline_worker(struct work_struct *work);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
@@ -2011,18 +2012,31 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"-:-");
- if (pqi_is_logical_device(device))
+ if (pqi_is_logical_device(device)) {
count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" %08x%08x",
*((u32 *)&device->scsi3addr),
*((u32 *)&device->scsi3addr[4]));
- else
+ } else if (ctrl_info->rpl_extended_format_4_5_supported) {
+ if (device->device_type == SA_DEVICE_TYPE_NVME)
+ count += scnprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %016llx%016llx",
+ get_unaligned_be64(&device->wwid[0]),
+ get_unaligned_be64(&device->wwid[8]));
+ else
+ count += scnprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %016llx",
+ get_unaligned_be64(&device->wwid[0]));
+ } else {
count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
- " %016llx%016llx",
- get_unaligned_be64(&device->wwid[0]),
- get_unaligned_be64(&device->wwid[8]));
+ " %016llx",
+ get_unaligned_be64(&device->wwid[0]));
+ }
+
count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
" %s %.8s %.16s ",
@@ -5990,7 +6004,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
pqi_stream_data->next_lba = rmd.first_block +
rmd.block_cnt;
pqi_stream_data->last_accessed = jiffies;
- per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
+ per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++;
return true;
}
@@ -6069,7 +6083,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
- per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
+ per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++;
}
}
if (!raid_bypassed)
@@ -9129,6 +9143,7 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_all_outstanding_requests(ctrl_info);
pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_take_ctrl_devices_offline(ctrl_info);
}
static void pqi_ctrl_offline_worker(struct work_struct *work)
@@ -9203,6 +9218,27 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
schedule_work(&ctrl_info->ctrl_offline_work);
}
+static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list);
+ if (rc)
+ continue;
+
+ /*
+ * Is the sdev pointer NULL?
+ */
+ if (device->sdev)
+ scsi_device_set_state(device->sdev, SDEV_OFFLINE);
+ }
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
@@ -9711,6 +9747,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x00a3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x00a1)
},
{
@@ -10047,6 +10087,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4044)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4084)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4094)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4140)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4240)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
{
@@ -10263,6 +10327,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1018, 0x8238)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f3f, 0x0610)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0220)
},
{
@@ -10271,10 +10343,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0222)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0223)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0224)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0225)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0520)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0521)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0522)
},
{
@@ -10295,6 +10387,26 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0624)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0625)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0626)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0627)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0628)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1014, 0x0718)
},
{
@@ -10323,6 +10435,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ded, 0x3301)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0045)
},
{
@@ -10471,6 +10587,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x100e)
},
{
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index 2310afa77b76..c467b55b4174 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -21,20 +21,63 @@
#include <soc/qcom/ice.h>
-#define AES_256_XTS_KEY_SIZE 64
+#define AES_256_XTS_KEY_SIZE 64 /* for raw keys only */
+#define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE 100 /* assuming HWKM v2 */
/* QCOM ICE registers */
+
+#define QCOM_ICE_REG_CONTROL 0x0000
+#define QCOM_ICE_LEGACY_MODE_ENABLED BIT(0)
+
#define QCOM_ICE_REG_VERSION 0x0008
+
#define QCOM_ICE_REG_FUSE_SETTING 0x0010
+#define QCOM_ICE_FUSE_SETTING_MASK BIT(0)
+#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK BIT(1)
+#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK BIT(2)
+
#define QCOM_ICE_REG_BIST_STATUS 0x0070
+#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
+
#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
-/* BIST ("built-in self-test") status flags */
-#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
+#define QCOM_ICE_REG_CRYPTOCFG_BASE 0x4040
+#define QCOM_ICE_REG_CRYPTOCFG_SIZE 0x80
+#define QCOM_ICE_REG_CRYPTOCFG(slot) (QCOM_ICE_REG_CRYPTOCFG_BASE + \
+ QCOM_ICE_REG_CRYPTOCFG_SIZE * (slot))
+union crypto_cfg {
+ __le32 regval;
+ struct {
+ u8 dusize;
+ u8 capidx;
+ u8 reserved;
+#define QCOM_ICE_HWKM_CFG_ENABLE_VAL BIT(7)
+ u8 cfge;
+ };
+};
-#define QCOM_ICE_FUSE_SETTING_MASK 0x1
-#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
-#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
+/* QCOM ICE HWKM (Hardware Key Manager) registers */
+
+#define HWKM_OFFSET 0x8000
+
+#define QCOM_ICE_REG_HWKM_TZ_KM_CTL (HWKM_OFFSET + 0x1000)
+#define QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL (BIT(1) | BIT(2))
+
+#define QCOM_ICE_REG_HWKM_TZ_KM_STATUS (HWKM_OFFSET + 0x1004)
+#define QCOM_ICE_HWKM_KT_CLEAR_DONE BIT(0)
+#define QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE BIT(1)
+#define QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE BIT(2)
+#define QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 BIT(7)
+#define QCOM_ICE_HWKM_BIST_DONE_V2 BIT(9)
+
+#define QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS (HWKM_OFFSET + 0x2008)
+#define QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL BIT(3)
+
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_0 (HWKM_OFFSET + 0x5000)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_1 (HWKM_OFFSET + 0x5004)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_2 (HWKM_OFFSET + 0x5008)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_3 (HWKM_OFFSET + 0x500C)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_4 (HWKM_OFFSET + 0x5010)
#define qcom_ice_writel(engine, val, reg) \
writel((val), (engine)->base + (reg))
@@ -42,11 +85,18 @@
#define qcom_ice_readl(engine, reg) \
readl((engine)->base + (reg))
+static bool qcom_ice_use_wrapped_keys;
+module_param_named(use_wrapped_keys, qcom_ice_use_wrapped_keys, bool, 0660);
+MODULE_PARM_DESC(use_wrapped_keys,
+ "Support wrapped keys instead of raw keys, if available on the platform");
+
struct qcom_ice {
struct device *dev;
void __iomem *base;
struct clk *core_clk;
+ bool use_hwkm;
+ bool hwkm_init_complete;
};
static bool qcom_ice_check_supported(struct qcom_ice *ice)
@@ -76,6 +126,35 @@ static bool qcom_ice_check_supported(struct qcom_ice *ice)
return false;
}
+ /*
+ * Check for HWKM support and decide whether to use it or not. ICE
+ * v3.2.1 and later have HWKM v2. ICE v3.2.0 has HWKM v1. Earlier ICE
+ * versions don't have HWKM at all. However, for HWKM to be fully
+ * usable by Linux, the TrustZone software also needs to support certain
+ * SCM calls including the ones to generate and prepare keys. That
+ * effectively makes the earliest supported SoC be SM8650, which has
+ * HWKM v2. Therefore, this driver doesn't include support for HWKM v1,
+ * and it checks for the SCM call support before it decides to use HWKM.
+ *
+ * Also, since HWKM and legacy mode are mutually exclusive, and
+ * ICE-capable storage driver(s) need to know early on whether to
+ * advertise support for raw keys or wrapped keys, HWKM cannot be used
+ * unconditionally. A module parameter is used to opt into using it.
+ */
+ if ((major >= 4 ||
+ (major == 3 && (minor >= 3 || (minor == 2 && step >= 1)))) &&
+ qcom_scm_has_wrapped_key_support()) {
+ if (qcom_ice_use_wrapped_keys) {
+ dev_info(dev, "Using HWKM. Supporting wrapped keys only.\n");
+ ice->use_hwkm = true;
+ } else {
+ dev_info(dev, "Not using HWKM. Supporting raw keys only.\n");
+ }
+ } else if (qcom_ice_use_wrapped_keys) {
+ dev_warn(dev, "A supported HWKM is not present. Ignoring qcom_ice.use_wrapped_keys=1.\n");
+ } else {
+ dev_info(dev, "A supported HWKM is not present. Supporting raw keys only.\n");
+ }
return true;
}
@@ -123,17 +202,71 @@ static int qcom_ice_wait_bist_status(struct qcom_ice *ice)
err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS,
regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
50, 5000);
- if (err)
+ if (err) {
dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n");
+ return err;
+ }
- return err;
+ if (ice->use_hwkm &&
+ qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_STATUS) !=
+ (QCOM_ICE_HWKM_KT_CLEAR_DONE |
+ QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE |
+ QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE |
+ QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 |
+ QCOM_ICE_HWKM_BIST_DONE_V2)) {
+ dev_err(ice->dev, "HWKM self-test error!\n");
+ /*
+ * Too late to revoke use_hwkm here, as it was already
+ * propagated up the stack into the crypto capabilities.
+ */
+ }
+ return 0;
+}
+
+static void qcom_ice_hwkm_init(struct qcom_ice *ice)
+{
+ u32 regval;
+
+ if (!ice->use_hwkm)
+ return;
+
+ BUILD_BUG_ON(QCOM_ICE_HWKM_WRAPPED_KEY_SIZE >
+ BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE);
+ /*
+ * When ICE is in HWKM mode, it only supports wrapped keys.
+ * When ICE is in legacy mode, it only supports raw keys.
+ *
+ * Put ICE in HWKM mode. ICE defaults to legacy mode.
+ */
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL);
+ regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED;
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL);
+
+ /* Disable CRC checks. This HWKM feature is not used. */
+ qcom_ice_writel(ice, QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL,
+ QCOM_ICE_REG_HWKM_TZ_KM_CTL);
+
+ /*
+ * Allow the HWKM slave to read and write the keyslots in the ICE HWKM
+ * slave. Without this, TrustZone cannot program keys into ICE.
+ */
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_0);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_1);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_2);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_3);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_4);
+
+ /* Clear the HWKM response FIFO. */
+ qcom_ice_writel(ice, QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL,
+ QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS);
+ ice->hwkm_init_complete = true;
}
int qcom_ice_enable(struct qcom_ice *ice)
{
qcom_ice_low_power_mode_enable(ice);
qcom_ice_optimization_enable(ice);
-
+ qcom_ice_hwkm_init(ice);
return qcom_ice_wait_bist_status(ice);
}
EXPORT_SYMBOL_GPL(qcom_ice_enable);
@@ -149,7 +282,7 @@ int qcom_ice_resume(struct qcom_ice *ice)
err);
return err;
}
-
+ qcom_ice_hwkm_init(ice);
return qcom_ice_wait_bist_status(ice);
}
EXPORT_SYMBOL_GPL(qcom_ice_resume);
@@ -157,15 +290,58 @@ EXPORT_SYMBOL_GPL(qcom_ice_resume);
int qcom_ice_suspend(struct qcom_ice *ice)
{
clk_disable_unprepare(ice->core_clk);
+ ice->hwkm_init_complete = false;
return 0;
}
EXPORT_SYMBOL_GPL(qcom_ice_suspend);
-int qcom_ice_program_key(struct qcom_ice *ice,
- u8 algorithm_id, u8 key_size,
- const u8 crypto_key[], u8 data_unit_size,
- int slot)
+static unsigned int translate_hwkm_slot(struct qcom_ice *ice, unsigned int slot)
+{
+ return slot * 2;
+}
+
+static int qcom_ice_program_wrapped_key(struct qcom_ice *ice, unsigned int slot,
+ const struct blk_crypto_key *bkey)
+{
+ struct device *dev = ice->dev;
+ union crypto_cfg cfg = {
+ .dusize = bkey->crypto_cfg.data_unit_size / 512,
+ .capidx = QCOM_SCM_ICE_CIPHER_AES_256_XTS,
+ .cfge = QCOM_ICE_HWKM_CFG_ENABLE_VAL,
+ };
+ int err;
+
+ if (!ice->use_hwkm) {
+ dev_err_ratelimited(dev, "Got wrapped key when not using HWKM\n");
+ return -EINVAL;
+ }
+ if (!ice->hwkm_init_complete) {
+ dev_err_ratelimited(dev, "HWKM not yet initialized\n");
+ return -EINVAL;
+ }
+
+ /* Clear CFGE before programming the key. */
+ qcom_ice_writel(ice, 0x0, QCOM_ICE_REG_CRYPTOCFG(slot));
+
+ /* Call into TrustZone to program the wrapped key using HWKM. */
+ err = qcom_scm_ice_set_key(translate_hwkm_slot(ice, slot), bkey->bytes,
+ bkey->size, cfg.capidx, cfg.dusize);
+ if (err) {
+ dev_err_ratelimited(dev,
+ "qcom_scm_ice_set_key failed; err=%d, slot=%u\n",
+ err, slot);
+ return err;
+ }
+
+ /* Set CFGE after programming the key. */
+ qcom_ice_writel(ice, le32_to_cpu(cfg.regval),
+ QCOM_ICE_REG_CRYPTOCFG(slot));
+ return 0;
+}
+
+int qcom_ice_program_key(struct qcom_ice *ice, unsigned int slot,
+ const struct blk_crypto_key *blk_key)
{
struct device *dev = ice->dev;
union {
@@ -176,15 +352,26 @@ int qcom_ice_program_key(struct qcom_ice *ice,
int err;
/* Only AES-256-XTS has been tested so far. */
- if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS ||
- key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) {
- dev_err_ratelimited(dev,
- "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
- algorithm_id, key_size);
+ if (blk_key->crypto_cfg.crypto_mode !=
+ BLK_ENCRYPTION_MODE_AES_256_XTS) {
+ dev_err_ratelimited(dev, "Unsupported crypto mode: %d\n",
+ blk_key->crypto_cfg.crypto_mode);
+ return -EINVAL;
+ }
+
+ if (blk_key->crypto_cfg.key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
+ return qcom_ice_program_wrapped_key(ice, slot, blk_key);
+
+ if (ice->use_hwkm) {
+ dev_err_ratelimited(dev, "Got raw key when using HWKM\n");
return -EINVAL;
}
- memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE);
+ if (blk_key->size != AES_256_XTS_KEY_SIZE) {
+ dev_err_ratelimited(dev, "Incorrect key size\n");
+ return -EINVAL;
+ }
+ memcpy(key.bytes, blk_key->bytes, AES_256_XTS_KEY_SIZE);
/* The SCM call requires that the key words are encoded in big endian */
for (i = 0; i < ARRAY_SIZE(key.words); i++)
@@ -192,7 +379,7 @@ int qcom_ice_program_key(struct qcom_ice *ice,
err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
QCOM_SCM_ICE_CIPHER_AES_256_XTS,
- data_unit_size);
+ blk_key->crypto_cfg.data_unit_size / 512);
memzero_explicit(&key, sizeof(key));
@@ -202,10 +389,131 @@ EXPORT_SYMBOL_GPL(qcom_ice_program_key);
int qcom_ice_evict_key(struct qcom_ice *ice, int slot)
{
+ if (ice->hwkm_init_complete)
+ slot = translate_hwkm_slot(ice, slot);
return qcom_scm_ice_invalidate_key(slot);
}
EXPORT_SYMBOL_GPL(qcom_ice_evict_key);
+/**
+ * qcom_ice_get_supported_key_type() - Get the supported key type
+ * @ice: ICE driver data
+ *
+ * Return: the blk-crypto key type that the ICE driver is configured to use.
+ * This is the key type that ICE-capable storage drivers should advertise as
+ * supported in the crypto capabilities of any disks they register.
+ */
+enum blk_crypto_key_type qcom_ice_get_supported_key_type(struct qcom_ice *ice)
+{
+ if (ice->use_hwkm)
+ return BLK_CRYPTO_KEY_TYPE_HW_WRAPPED;
+ return BLK_CRYPTO_KEY_TYPE_RAW;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_get_supported_key_type);
+
+/**
+ * qcom_ice_derive_sw_secret() - Derive software secret from wrapped key
+ * @ice: ICE driver data
+ * @eph_key: an ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes
+ * @sw_secret: output buffer for the software secret
+ *
+ * Use HWKM to derive the "software secret" from a hardware-wrapped key that is
+ * given in ephemerally-wrapped form.
+ *
+ * Return: 0 on success; -EBADMSG if the given ephemerally-wrapped key is
+ * invalid; or another -errno value.
+ */
+int qcom_ice_derive_sw_secret(struct qcom_ice *ice,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ int err = qcom_scm_derive_sw_secret(eph_key, eph_key_size,
+ sw_secret,
+ BLK_CRYPTO_SW_SECRET_SIZE);
+ if (err == -EIO || err == -EINVAL)
+ err = -EBADMSG; /* probably invalid key */
+ return err;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_derive_sw_secret);
+
+/**
+ * qcom_ice_generate_key() - Generate a wrapped key for inline encryption
+ * @ice: ICE driver data
+ * @lt_key: output buffer for the long-term wrapped key
+ *
+ * Use HWKM to generate a new key and return it as a long-term wrapped key.
+ *
+ * Return: the size of the resulting wrapped key on success; -errno on failure.
+ */
+int qcom_ice_generate_key(struct qcom_ice *ice,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int err;
+
+ err = qcom_scm_generate_ice_key(lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
+ if (err)
+ return err;
+
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_generate_key);
+
+/**
+ * qcom_ice_prepare_key() - Prepare a wrapped key for inline encryption
+ * @ice: ICE driver data
+ * @lt_key: a long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes
+ * @eph_key: output buffer for the ephemerally-wrapped key
+ *
+ * Use HWKM to re-wrap a long-term wrapped key with the per-boot ephemeral key.
+ *
+ * Return: the size of the resulting wrapped key on success; -EBADMSG if the
+ * given long-term wrapped key is invalid; or another -errno value.
+ */
+int qcom_ice_prepare_key(struct qcom_ice *ice,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int err;
+
+ err = qcom_scm_prepare_ice_key(lt_key, lt_key_size,
+ eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
+ if (err == -EIO || err == -EINVAL)
+ err = -EBADMSG; /* probably invalid key */
+ if (err)
+ return err;
+
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_prepare_key);
+
+/**
+ * qcom_ice_import_key() - Import a raw key for inline encryption
+ * @ice: ICE driver data
+ * @raw_key: the raw key to import
+ * @raw_key_size: size of @raw_key in bytes
+ * @lt_key: output buffer for the long-term wrapped key
+ *
+ * Use HWKM to import a raw key and return it as a long-term wrapped key.
+ *
+ * Return: the size of the resulting wrapped key on success; -errno on failure.
+ */
+int qcom_ice_import_key(struct qcom_ice *ice,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int err;
+
+ err = qcom_scm_import_ice_key(raw_key, raw_key_size,
+ lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
+ if (err)
+ return err;
+
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_import_key);
+
static struct qcom_ice *qcom_ice_create(struct device *dev,
void __iomem *base)
{
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 66804bf1ee32..0904ecae253a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -673,12 +673,10 @@ static ssize_t emulate_model_alias_store(struct config_item *item,
return ret;
BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
- if (flag) {
+ if (flag)
dev_set_t10_wwn_model_alias(dev);
- } else {
- strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
- sizeof(dev->t10_wwn.model));
- }
+ else
+ strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod);
da->emulate_model_alias = flag;
return count;
}
@@ -1433,7 +1431,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
ssize_t len;
ssize_t ret;
- len = strscpy(buf, page, sizeof(buf));
+ len = strscpy(buf, page);
if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
@@ -1464,7 +1462,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
- strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
+ strscpy(dev->t10_wwn.vendor, stripped);
pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
" %s\n", dev->t10_wwn.vendor);
@@ -1489,7 +1487,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item,
ssize_t len;
ssize_t ret;
- len = strscpy(buf, page, sizeof(buf));
+ len = strscpy(buf, page);
if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
@@ -1520,7 +1518,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item,
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
- strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
+ strscpy(dev->t10_wwn.model, stripped);
pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
dev->t10_wwn.model);
@@ -1545,7 +1543,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
ssize_t len;
ssize_t ret;
- len = strscpy(buf, page, sizeof(buf));
+ len = strscpy(buf, page);
if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
@@ -1576,7 +1574,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
- strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
+ strscpy(dev->t10_wwn.revision, stripped);
pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
dev->t10_wwn.revision);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cc2da086f96e..7bb711b24c0d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -55,14 +55,14 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
- atomic_long_inc(&deve->total_cmds);
+ this_cpu_inc(deve->stats->total_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &deve->write_bytes);
+ this_cpu_add(deve->stats->write_bytes,
+ se_cmd->data_length);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &deve->read_bytes);
+ this_cpu_add(deve->stats->read_bytes,
+ se_cmd->data_length);
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
deve->lun_access_ro) {
@@ -126,14 +126,14 @@ out_unlock:
* target_core_fabric_configfs.c:target_fabric_port_release
*/
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
- atomic_long_inc(&se_cmd->se_dev->num_cmds);
+ this_cpu_inc(se_cmd->se_dev->stats->total_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &se_cmd->se_dev->write_bytes);
+ this_cpu_add(se_cmd->se_dev->stats->write_bytes,
+ se_cmd->data_length);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &se_cmd->se_dev->read_bytes);
+ this_cpu_add(se_cmd->se_dev->stats->read_bytes,
+ se_cmd->data_length);
return ret;
}
@@ -322,6 +322,7 @@ int core_enable_device_list_for_node(
struct se_portal_group *tpg)
{
struct se_dev_entry *orig, *new;
+ int ret = 0;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new) {
@@ -329,6 +330,12 @@ int core_enable_device_list_for_node(
return -ENOMEM;
}
+ new->stats = alloc_percpu(struct se_dev_entry_io_stats);
+ if (!new->stats) {
+ ret = -ENOMEM;
+ goto free_deve;
+ }
+
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
@@ -351,8 +358,8 @@ int core_enable_device_list_for_node(
" for dynamic -> explicit NodeACL conversion:"
" %s\n", nacl->initiatorname);
mutex_unlock(&nacl->lun_entry_mutex);
- kfree(new);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_stats;
}
if (orig->se_lun_acl != NULL) {
pr_warn_ratelimited("Detected existing explicit"
@@ -360,8 +367,8 @@ int core_enable_device_list_for_node(
" mapped_lun: %llu, failing\n",
nacl->initiatorname, mapped_lun);
mutex_unlock(&nacl->lun_entry_mutex);
- kfree(new);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_stats;
}
new->se_lun = lun;
@@ -394,6 +401,20 @@ int core_enable_device_list_for_node(
target_luns_data_has_changed(nacl, new, true);
return 0;
+
+free_stats:
+ free_percpu(new->stats);
+free_deve:
+ kfree(new);
+ return ret;
+}
+
+static void target_free_dev_entry(struct rcu_head *head)
+{
+ struct se_dev_entry *deve = container_of(head, struct se_dev_entry,
+ rcu_head);
+ free_percpu(deve->stats);
+ kfree(deve);
}
void core_disable_device_list_for_node(
@@ -443,7 +464,7 @@ void core_disable_device_list_for_node(
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
- kfree_rcu(orig, rcu_head);
+ call_rcu(&orig->rcu_head, target_free_dev_entry);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
target_luns_data_has_changed(nacl, NULL, false);
@@ -679,6 +700,18 @@ static void scsi_dump_inquiry(struct se_device *dev)
pr_debug(" Type: %s ", scsi_device_type(device_type));
}
+static void target_non_ordered_release(struct percpu_ref *ref)
+{
+ struct se_device *dev = container_of(ref, struct se_device,
+ non_ordered);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ if (!list_empty(&dev->delayed_cmd_list))
+ schedule_work(&dev->delayed_cmd_work);
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
+}
+
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
@@ -689,11 +722,13 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
if (!dev)
return NULL;
+ dev->stats = alloc_percpu(struct se_dev_io_stats);
+ if (!dev->stats)
+ goto free_device;
+
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
- if (!dev->queues) {
- hba->backend->ops->free_device(dev);
- return NULL;
- }
+ if (!dev->queues)
+ goto free_stats;
dev->queue_cnt = nr_cpu_ids;
for (i = 0; i < dev->queue_cnt; i++) {
@@ -707,6 +742,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_WORK(&q->sq.work, target_queued_submit_work);
}
+ if (percpu_ref_init(&dev->non_ordered, target_non_ordered_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
+ goto free_queues;
+
dev->se_hba = hba;
dev->transport = hba->backend->ops;
dev->transport_flags = dev->transport->transport_flags_default;
@@ -791,6 +830,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
sizeof(dev->t10_wwn.revision));
return dev;
+
+free_queues:
+ kfree(dev->queues);
+free_stats:
+ free_percpu(dev->stats);
+free_device:
+ hba->backend->ops->free_device(dev);
+ return NULL;
}
/*
@@ -980,6 +1027,9 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
+ percpu_ref_exit(&dev->non_ordered);
+ cancel_work_sync(&dev->delayed_cmd_work);
+
if (target_dev_configured(dev)) {
dev->transport->destroy_device(dev);
@@ -1001,6 +1051,7 @@ void target_free_device(struct se_device *dev)
dev->transport->free_prot(dev);
kfree(dev->queues);
+ free_percpu(dev->stats);
dev->transport->free_device(dev);
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 0a02492bef70..aad0096afa21 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1325,7 +1325,7 @@ static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev)
usage_bits[10] |= 0x18;
}
-static struct target_opcode_descriptor tcm_opcode_read6 = {
+static const struct target_opcode_descriptor tcm_opcode_read6 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_6,
.cdb_size = 6,
@@ -1333,7 +1333,7 @@ static struct target_opcode_descriptor tcm_opcode_read6 = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_read10 = {
+static const struct target_opcode_descriptor tcm_opcode_read10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_10,
.cdb_size = 10,
@@ -1343,7 +1343,7 @@ static struct target_opcode_descriptor tcm_opcode_read10 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_read12 = {
+static const struct target_opcode_descriptor tcm_opcode_read12 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_12,
.cdb_size = 12,
@@ -1353,7 +1353,7 @@ static struct target_opcode_descriptor tcm_opcode_read12 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_read16 = {
+static const struct target_opcode_descriptor tcm_opcode_read16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_16,
.cdb_size = 16,
@@ -1364,7 +1364,7 @@ static struct target_opcode_descriptor tcm_opcode_read16 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write6 = {
+static const struct target_opcode_descriptor tcm_opcode_write6 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_6,
.cdb_size = 6,
@@ -1372,7 +1372,7 @@ static struct target_opcode_descriptor tcm_opcode_write6 = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_write10 = {
+static const struct target_opcode_descriptor tcm_opcode_write10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_10,
.cdb_size = 10,
@@ -1382,7 +1382,7 @@ static struct target_opcode_descriptor tcm_opcode_write10 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write_verify10 = {
+static const struct target_opcode_descriptor tcm_opcode_write_verify10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_VERIFY,
.cdb_size = 10,
@@ -1392,7 +1392,7 @@ static struct target_opcode_descriptor tcm_opcode_write_verify10 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write12 = {
+static const struct target_opcode_descriptor tcm_opcode_write12 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_12,
.cdb_size = 12,
@@ -1402,7 +1402,7 @@ static struct target_opcode_descriptor tcm_opcode_write12 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write16 = {
+static const struct target_opcode_descriptor tcm_opcode_write16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_16,
.cdb_size = 16,
@@ -1413,7 +1413,7 @@ static struct target_opcode_descriptor tcm_opcode_write16 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
+static const struct target_opcode_descriptor tcm_opcode_write_verify16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_VERIFY_16,
.cdb_size = 16,
@@ -1424,7 +1424,7 @@ static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_ws_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
@@ -1434,7 +1434,7 @@ static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
!!ops->execute_write_same;
}
-static struct target_opcode_descriptor tcm_opcode_write_same32 = {
+static const struct target_opcode_descriptor tcm_opcode_write_same32 = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = VARIABLE_LENGTH_CMD,
@@ -1452,7 +1452,7 @@ static struct target_opcode_descriptor tcm_opcode_write_same32 = {
.update_usage_bits = set_dpofua_usage_bits32,
};
-static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1460,7 +1460,7 @@ static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
return dev->dev_attrib.emulate_caw;
}
-static struct target_opcode_descriptor tcm_opcode_compare_write = {
+static const struct target_opcode_descriptor tcm_opcode_compare_write = {
.support = SCSI_SUPPORT_FULL,
.opcode = COMPARE_AND_WRITE,
.cdb_size = 16,
@@ -1472,7 +1472,7 @@ static struct target_opcode_descriptor tcm_opcode_compare_write = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_read_capacity = {
+static const struct target_opcode_descriptor tcm_opcode_read_capacity = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_CAPACITY,
.cdb_size = 10,
@@ -1481,7 +1481,7 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity = {
0x01, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
+static const struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = SERVICE_ACTION_IN_16,
@@ -1493,7 +1493,7 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_rep_ref_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1507,7 +1507,7 @@ static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
return true;
}
-static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
+static const struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = SERVICE_ACTION_IN_16,
@@ -1520,7 +1520,7 @@ static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
.enabled = tcm_is_rep_ref_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_sync_cache = {
+static const struct target_opcode_descriptor tcm_opcode_sync_cache = {
.support = SCSI_SUPPORT_FULL,
.opcode = SYNCHRONIZE_CACHE,
.cdb_size = 10,
@@ -1529,7 +1529,7 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
+static const struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = SYNCHRONIZE_CACHE_16,
.cdb_size = 16,
@@ -1539,7 +1539,7 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
};
-static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_unmap_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
@@ -1548,7 +1548,7 @@ static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
}
-static struct target_opcode_descriptor tcm_opcode_unmap = {
+static const struct target_opcode_descriptor tcm_opcode_unmap = {
.support = SCSI_SUPPORT_FULL,
.opcode = UNMAP,
.cdb_size = 10,
@@ -1558,7 +1558,7 @@ static struct target_opcode_descriptor tcm_opcode_unmap = {
.enabled = tcm_is_unmap_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_write_same = {
+static const struct target_opcode_descriptor tcm_opcode_write_same = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_SAME,
.cdb_size = 10,
@@ -1568,7 +1568,7 @@ static struct target_opcode_descriptor tcm_opcode_write_same = {
.enabled = tcm_is_ws_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_write_same16 = {
+static const struct target_opcode_descriptor tcm_opcode_write_same16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_SAME_16,
.cdb_size = 16,
@@ -1579,7 +1579,7 @@ static struct target_opcode_descriptor tcm_opcode_write_same16 = {
.enabled = tcm_is_ws_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_verify = {
+static const struct target_opcode_descriptor tcm_opcode_verify = {
.support = SCSI_SUPPORT_FULL,
.opcode = VERIFY,
.cdb_size = 10,
@@ -1588,7 +1588,7 @@ static struct target_opcode_descriptor tcm_opcode_verify = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_verify16 = {
+static const struct target_opcode_descriptor tcm_opcode_verify16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = VERIFY_16,
.cdb_size = 16,
@@ -1598,7 +1598,7 @@ static struct target_opcode_descriptor tcm_opcode_verify16 = {
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_start_stop = {
+static const struct target_opcode_descriptor tcm_opcode_start_stop = {
.support = SCSI_SUPPORT_FULL,
.opcode = START_STOP,
.cdb_size = 6,
@@ -1606,7 +1606,7 @@ static struct target_opcode_descriptor tcm_opcode_start_stop = {
0x01, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_mode_select = {
+static const struct target_opcode_descriptor tcm_opcode_mode_select = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SELECT,
.cdb_size = 6,
@@ -1614,7 +1614,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_select = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_mode_select10 = {
+static const struct target_opcode_descriptor tcm_opcode_mode_select10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SELECT_10,
.cdb_size = 10,
@@ -1623,7 +1623,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_select10 = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_mode_sense = {
+static const struct target_opcode_descriptor tcm_opcode_mode_sense = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SENSE,
.cdb_size = 6,
@@ -1631,7 +1631,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_sense = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
+static const struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SENSE_10,
.cdb_size = 10,
@@ -1640,7 +1640,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
+static const struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
@@ -1651,7 +1651,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
+static const struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
@@ -1662,7 +1662,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
0xff, SCSI_CONTROL_MASK},
};
-static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_pr_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1704,7 +1704,7 @@ static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
return true;
}
-static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
+static const struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
@@ -1716,7 +1716,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
+static const struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
@@ -1728,7 +1728,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_register = {
+static const struct target_opcode_descriptor tcm_opcode_pro_register = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1740,7 +1740,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_register = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_reserve = {
+static const struct target_opcode_descriptor tcm_opcode_pro_reserve = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1752,7 +1752,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_reserve = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_release = {
+static const struct target_opcode_descriptor tcm_opcode_pro_release = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1764,7 +1764,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_release = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_clear = {
+static const struct target_opcode_descriptor tcm_opcode_pro_clear = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1776,7 +1776,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_clear = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_preempt = {
+static const struct target_opcode_descriptor tcm_opcode_pro_preempt = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1788,7 +1788,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_preempt = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
+static const struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1800,7 +1800,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
+static const struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1814,7 +1814,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
+static const struct target_opcode_descriptor tcm_opcode_pro_register_move = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1826,7 +1826,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_release = {
+static const struct target_opcode_descriptor tcm_opcode_release = {
.support = SCSI_SUPPORT_FULL,
.opcode = RELEASE_6,
.cdb_size = 6,
@@ -1835,7 +1835,7 @@ static struct target_opcode_descriptor tcm_opcode_release = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_release10 = {
+static const struct target_opcode_descriptor tcm_opcode_release10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = RELEASE_10,
.cdb_size = 10,
@@ -1845,7 +1845,7 @@ static struct target_opcode_descriptor tcm_opcode_release10 = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_reserve = {
+static const struct target_opcode_descriptor tcm_opcode_reserve = {
.support = SCSI_SUPPORT_FULL,
.opcode = RESERVE_6,
.cdb_size = 6,
@@ -1854,7 +1854,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_reserve10 = {
+static const struct target_opcode_descriptor tcm_opcode_reserve10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = RESERVE_10,
.cdb_size = 10,
@@ -1864,7 +1864,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve10 = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_request_sense = {
+static const struct target_opcode_descriptor tcm_opcode_request_sense = {
.support = SCSI_SUPPORT_FULL,
.opcode = REQUEST_SENSE,
.cdb_size = 6,
@@ -1872,7 +1872,7 @@ static struct target_opcode_descriptor tcm_opcode_request_sense = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_inquiry = {
+static const struct target_opcode_descriptor tcm_opcode_inquiry = {
.support = SCSI_SUPPORT_FULL,
.opcode = INQUIRY,
.cdb_size = 6,
@@ -1880,7 +1880,7 @@ static struct target_opcode_descriptor tcm_opcode_inquiry = {
0xff, SCSI_CONTROL_MASK},
};
-static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_3pc_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1888,7 +1888,7 @@ static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
return dev->dev_attrib.emulate_3pc;
}
-static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
+static const struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = EXTENDED_COPY,
@@ -1900,7 +1900,7 @@ static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
.enabled = tcm_is_3pc_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
+static const struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = RECEIVE_COPY_RESULTS,
@@ -1914,7 +1914,7 @@ static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
.enabled = tcm_is_3pc_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_report_luns = {
+static const struct target_opcode_descriptor tcm_opcode_report_luns = {
.support = SCSI_SUPPORT_FULL,
.opcode = REPORT_LUNS,
.cdb_size = 12,
@@ -1923,7 +1923,7 @@ static struct target_opcode_descriptor tcm_opcode_report_luns = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
+static const struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
.support = SCSI_SUPPORT_FULL,
.opcode = TEST_UNIT_READY,
.cdb_size = 6,
@@ -1931,7 +1931,7 @@ static struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
0x00, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
+static const struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = MAINTENANCE_IN,
@@ -1942,7 +1942,7 @@ static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
+static bool spc_rsoc_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1950,7 +1950,7 @@ static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
return dev->dev_attrib.emulate_rsoc;
}
-static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
+static const struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = MAINTENANCE_IN,
@@ -1963,7 +1963,7 @@ static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
.enabled = spc_rsoc_enabled,
};
-static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_set_tpg_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct t10_alua_tg_pt_gp *l_tg_pt_gp;
@@ -1984,7 +1984,7 @@ static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
return true;
}
-static struct target_opcode_descriptor tcm_opcode_set_tpg = {
+static const struct target_opcode_descriptor tcm_opcode_set_tpg = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = MAINTENANCE_OUT,
@@ -1996,7 +1996,7 @@ static struct target_opcode_descriptor tcm_opcode_set_tpg = {
.enabled = tcm_is_set_tpg_enabled,
};
-static struct target_opcode_descriptor *tcm_supported_opcodes[] = {
+static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
&tcm_opcode_read6,
&tcm_opcode_read10,
&tcm_opcode_read12,
@@ -2053,7 +2053,7 @@ static struct target_opcode_descriptor *tcm_supported_opcodes[] = {
static int
spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp,
- struct target_opcode_descriptor *descr)
+ const struct target_opcode_descriptor *descr)
{
if (!ctdp)
return 0;
@@ -2068,7 +2068,7 @@ spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp,
static int
spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp,
- struct target_opcode_descriptor *descr)
+ const struct target_opcode_descriptor *descr)
{
int td_size = 0;
@@ -2087,7 +2087,7 @@ spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp,
static int
spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp,
- struct target_opcode_descriptor *descr,
+ const struct target_opcode_descriptor *descr,
struct se_device *dev)
{
int td_size = 0;
@@ -2110,9 +2110,9 @@ spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp,
}
static sense_reason_t
-spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
+spc_rsoc_get_descr(struct se_cmd *cmd, const struct target_opcode_descriptor **opcode)
{
- struct target_opcode_descriptor *descr;
+ const struct target_opcode_descriptor *descr;
struct se_session *sess = cmd->se_sess;
unsigned char *cdb = cmd->t_task_cdb;
u8 opts = cdb[2] & 0x3;
@@ -2199,7 +2199,7 @@ static sense_reason_t
spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
{
int descr_num = ARRAY_SIZE(tcm_supported_opcodes);
- struct target_opcode_descriptor *descr = NULL;
+ const struct target_opcode_descriptor *descr = NULL;
unsigned char *cdb = cmd->t_task_cdb;
u8 rctd = (cdb[2] >> 7) & 0x1;
unsigned char *buf = NULL;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 210648a0092e..6bdf2d8bd694 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -280,30 +280,51 @@ static ssize_t target_stat_lu_num_cmds_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
+ struct se_dev_io_stats *stats;
+ unsigned int cpu;
+ u32 cmds = 0;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(dev->stats, cpu);
+ cmds += stats->total_cmds;
+ }
/* scsiLuNumCommands */
- return snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&dev->num_cmds));
+ return snprintf(page, PAGE_SIZE, "%u\n", cmds);
}
static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
+ struct se_dev_io_stats *stats;
+ unsigned int cpu;
+ u32 bytes = 0;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(dev->stats, cpu);
+ bytes += stats->read_bytes;
+ }
/* scsiLuReadMegaBytes */
- return snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&dev->read_bytes) >> 20);
+ return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
}
static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
+ struct se_dev_io_stats *stats;
+ unsigned int cpu;
+ u32 bytes = 0;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(dev->stats, cpu);
+ bytes += stats->write_bytes;
+ }
/* scsiLuWrittenMegaBytes */
- return snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&dev->write_bytes) >> 20);
+ return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
}
static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
@@ -1019,8 +1040,11 @@ static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
+ unsigned int cpu;
ssize_t ret;
+ u32 cmds = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
@@ -1028,9 +1052,14 @@ static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(deve->stats, cpu);
+ cmds += stats->total_cmds;
+ }
+
/* scsiAuthIntrOutCommands */
- ret = snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&deve->total_cmds));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", cmds);
rcu_read_unlock();
return ret;
}
@@ -1040,8 +1069,11 @@ static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item,
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
+ unsigned int cpu;
ssize_t ret;
+ u32 bytes = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
@@ -1049,9 +1081,14 @@ static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(deve->stats, cpu);
+ bytes += stats->read_bytes;
+ }
+
/* scsiAuthIntrReadMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(atomic_long_read(&deve->read_bytes) >> 20));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
rcu_read_unlock();
return ret;
}
@@ -1061,8 +1098,11 @@ static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
+ unsigned int cpu;
ssize_t ret;
+ u32 bytes = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
@@ -1070,9 +1110,14 @@ static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(deve->stats, cpu);
+ bytes += stats->write_bytes;
+ }
+
/* scsiAuthIntrWrittenMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(atomic_long_read(&deve->write_bytes) >> 20));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
rcu_read_unlock();
return ret;
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 05d29201b730..0a76bdfe5528 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2213,6 +2213,7 @@ static int target_write_prot_action(struct se_cmd *cmd)
static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
+ unsigned long flags;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
@@ -2225,13 +2226,10 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
*/
switch (cmd->sam_task_attr) {
case TCM_HEAD_TAG:
- atomic_inc_mb(&dev->non_ordered);
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
cmd->t_task_cdb[0]);
return false;
case TCM_ORDERED_TAG:
- atomic_inc_mb(&dev->delayed_cmd_count);
-
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
cmd->t_task_cdb[0]);
break;
@@ -2239,29 +2237,29 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
/*
* For SIMPLE and UNTAGGED Task Attribute commands
*/
- atomic_inc_mb(&dev->non_ordered);
-
- if (atomic_read(&dev->delayed_cmd_count) == 0)
+retry:
+ if (percpu_ref_tryget_live(&dev->non_ordered))
return false;
+
break;
}
- if (cmd->sam_task_attr != TCM_ORDERED_TAG) {
- atomic_inc_mb(&dev->delayed_cmd_count);
- /*
- * We will account for this when we dequeue from the delayed
- * list.
- */
- atomic_dec_mb(&dev->non_ordered);
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ if (cmd->sam_task_attr == TCM_SIMPLE_TAG &&
+ !percpu_ref_is_dying(&dev->non_ordered)) {
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
+ /* We raced with the last ordered completion so retry. */
+ goto retry;
+ } else if (!percpu_ref_is_dying(&dev->non_ordered)) {
+ percpu_ref_kill(&dev->non_ordered);
}
- spin_lock_irq(&cmd->t_state_lock);
+ spin_lock(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_SENT;
- spin_unlock_irq(&cmd->t_state_lock);
+ spin_unlock(&cmd->t_state_lock);
- spin_lock(&dev->delayed_cmd_lock);
list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
- spin_unlock(&dev->delayed_cmd_lock);
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
cmd->t_task_cdb[0], cmd->sam_task_attr);
@@ -2313,41 +2311,52 @@ void target_do_delayed_work(struct work_struct *work)
while (!dev->ordered_sync_in_progress) {
struct se_cmd *cmd;
- if (list_empty(&dev->delayed_cmd_list))
+ /*
+ * We can be woken up early/late due to races or the
+ * extra wake up we do when adding commands to the list.
+ * We check for both cases here.
+ */
+ if (list_empty(&dev->delayed_cmd_list) ||
+ !percpu_ref_is_zero(&dev->non_ordered))
break;
cmd = list_entry(dev->delayed_cmd_list.next,
struct se_cmd, se_delayed_node);
+ cmd->se_cmd_flags |= SCF_TASK_ORDERED_SYNC;
+ cmd->transport_state |= CMD_T_SENT;
- if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
- /*
- * Check if we started with:
- * [ordered] [simple] [ordered]
- * and we are now at the last ordered so we have to wait
- * for the simple cmd.
- */
- if (atomic_read(&dev->non_ordered) > 0)
- break;
-
- dev->ordered_sync_in_progress = true;
- }
+ dev->ordered_sync_in_progress = true;
list_del(&cmd->se_delayed_node);
- atomic_dec_mb(&dev->delayed_cmd_count);
spin_unlock(&dev->delayed_cmd_lock);
- if (cmd->sam_task_attr != TCM_ORDERED_TAG)
- atomic_inc_mb(&dev->non_ordered);
-
- cmd->transport_state |= CMD_T_SENT;
-
__target_execute_cmd(cmd, true);
-
spin_lock(&dev->delayed_cmd_lock);
}
spin_unlock(&dev->delayed_cmd_lock);
}
+static void transport_complete_ordered_sync(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ dev->dev_cur_ordered_id++;
+
+ pr_debug("Incremented dev_cur_ordered_id: %u for type %d\n",
+ dev->dev_cur_ordered_id, cmd->sam_task_attr);
+
+ dev->ordered_sync_in_progress = false;
+
+ if (list_empty(&dev->delayed_cmd_list))
+ percpu_ref_resurrect(&dev->non_ordered);
+ else
+ schedule_work(&dev->delayed_cmd_work);
+
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
+}
+
/*
* Called from I/O completion to determine which dormant/delayed
* and ordered cmds need to have their tasks added to the execution queue.
@@ -2360,30 +2369,24 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
return;
if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
- goto restart;
-
- if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
- atomic_dec_mb(&dev->non_ordered);
- dev->dev_cur_ordered_id++;
- } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
- atomic_dec_mb(&dev->non_ordered);
- dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
- dev->dev_cur_ordered_id);
- } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
- spin_lock(&dev->delayed_cmd_lock);
- dev->ordered_sync_in_progress = false;
- spin_unlock(&dev->delayed_cmd_lock);
+ return;
- dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
- dev->dev_cur_ordered_id);
- }
cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
-restart:
- if (atomic_read(&dev->delayed_cmd_count) > 0)
- schedule_work(&dev->delayed_cmd_work);
+ if (cmd->se_cmd_flags & SCF_TASK_ORDERED_SYNC) {
+ transport_complete_ordered_sync(cmd);
+ return;
+ }
+
+ switch (cmd->sam_task_attr) {
+ case TCM_SIMPLE_TAG:
+ percpu_ref_put(&dev->non_ordered);
+ break;
+ case TCM_ORDERED_TAG:
+ /* All ordered should have been executed as sync */
+ WARN_ON(1);
+ break;
+ }
}
static void transport_complete_qf(struct se_cmd *cmd)
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index f1294c29f484..1e50675772fe 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -674,7 +674,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
- unsigned long flags;
int err;
/* Skip task abort in case previous aborts failed and report failure */
@@ -713,10 +712,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
return FAILED;
}
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
-
return SUCCESS;
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 634cf163f4cb..de8b6acd4058 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -57,6 +57,36 @@ static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)
}
}
+static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint)
+{
+ switch (hint) {
+ case WB_RESIZE_HINT_KEEP:
+ return "keep";
+ case WB_RESIZE_HINT_DECREASE:
+ return "decrease";
+ case WB_RESIZE_HINT_INCREASE:
+ return "increase";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status)
+{
+ switch (status) {
+ case WB_RESIZE_STATUS_IDLE:
+ return "idle";
+ case WB_RESIZE_STATUS_IN_PROGRESS:
+ return "in_progress";
+ case WB_RESIZE_STATUS_COMPLETE_SUCCESS:
+ return "complete_success";
+ case WB_RESIZE_STATUS_GENERAL_FAILURE:
+ return "general_failure";
+ default:
+ return "unknown";
+ }
+}
+
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
@@ -411,6 +441,44 @@ static ssize_t wb_flush_threshold_store(struct device *dev,
return count;
}
+static const char * const wb_resize_en_mode[] = {
+ [WB_RESIZE_EN_IDLE] = "idle",
+ [WB_RESIZE_EN_DECREASE] = "decrease",
+ [WB_RESIZE_EN_INCREASE] = "increase",
+};
+
+static ssize_t wb_resize_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ mode = sysfs_match_string(wb_resize_en_mode, buf);
+ if (mode < 0)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ res = ufshcd_wb_set_resize_en(hba, mode);
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
+}
+
/**
* pm_qos_enable_show - sysfs handler to show pm qos enable value
* @dev: device associated with the UFS controller
@@ -526,6 +594,7 @@ static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
+static DEVICE_ATTR_WO(wb_resize_enable);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
static DEVICE_ATTR_RO(critical_health);
@@ -543,6 +612,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_on.attr,
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
+ &dev_attr_wb_resize_enable.attr,
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
&dev_attr_critical_health.attr,
@@ -1549,6 +1619,67 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
+static int wb_read_resize_attrs(struct ufs_hba *hba,
+ enum attr_idn idn, u32 *attr_val)
+{
+ u8 index = 0;
+ int ret;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ index = ufshcd_wb_get_query_index(hba);
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ idn, index, 0, attr_val);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t wb_resize_hint_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_hint);
+
+static ssize_t wb_resize_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_status);
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -1622,6 +1753,8 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_wb_avail_buf.attr,
&dev_attr_wb_life_time_est.attr,
&dev_attr_wb_cur_buf.attr,
+ &dev_attr_wb_resize_hint.attr,
+ &dev_attr_wb_resize_status.attr,
NULL,
};
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 7735421e3991..76cedd30c274 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -53,7 +53,7 @@
/* UIC command timeout, unit: ms */
enum {
UIC_CMD_TIMEOUT_DEFAULT = 500,
- UIC_CMD_TIMEOUT_MAX = 2000,
+ UIC_CMD_TIMEOUT_MAX = 5000,
};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
@@ -63,7 +63,11 @@ enum {
/* Query request retries */
#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+enum {
+ QUERY_REQ_TIMEOUT_MIN = 1,
+ QUERY_REQ_TIMEOUT_DEFAULT = 1500,
+ QUERY_REQ_TIMEOUT_MAX = 30000
+};
/* Advanced RPMB request timeout */
#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
@@ -133,7 +137,24 @@ static const struct kernel_param_ops uic_cmd_timeout_ops = {
module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
MODULE_PARM_DESC(uic_cmd_timeout,
- "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively");
+
+static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT;
+
+static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN,
+ QUERY_REQ_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops dev_cmd_timeout_ops = {
+ .set = dev_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644);
+MODULE_PARM_DESC(dev_cmd_timeout,
+ "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
@@ -432,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
u8 opcode = 0, group_id = 0;
u32 doorbell = 0;
u32 intr;
- int hwq_id = -1;
+ u32 hwq_id = 0;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -644,9 +665,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
- dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- div_u64(hba->ufs_stats.last_intr_ts, 1000),
- hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
@@ -3365,7 +3383,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, selector = 0;
- int timeout = QUERY_REQ_TIMEOUT;
+ int timeout = dev_cmd_timeout;
BUG_ON(!hba);
@@ -3462,7 +3480,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -3558,7 +3576,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -6020,7 +6038,7 @@ int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: failed to read device level exception %d\n",
@@ -6107,6 +6125,21 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
return ret;
}
+int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode)
+{
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
u32 avail_buf)
{
@@ -6572,7 +6605,7 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
dev_info(hba->dev,
- "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
+ "%s started; HBA state %s; powered %d; shutting down %d; saved_err = 0x%x; saved_uic_err = 0x%x; force_reset = %d%s\n",
__func__, ufshcd_state_name[hba->ufshcd_state],
hba->is_powered, hba->shutting_down, hba->saved_err,
hba->saved_uic_err, hba->force_reset,
@@ -7001,7 +7034,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
}
/**
- * ufshcd_intr - Main interrupt service routine
+ * ufshcd_threaded_intr - Threaded interrupt service routine
* @irq: irq number
* @__hba: pointer to adapter instance
*
@@ -7009,16 +7042,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_intr(int irq, void *__hba)
+static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba)
{
- u32 intr_status, enabled_intr_status = 0;
+ u32 last_intr_status, intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = local_clock();
+ last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -7042,7 +7073,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
- hba->ufs_stats.last_intr_status,
+ last_intr_status,
enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -7050,6 +7081,29 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
return retval;
}
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Return:
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_WAKE_THREAD - If handling is moved to threaded handled
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+
+ /* Move interrupt handling to thread when MCQ & ESI are not enabled */
+ if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
+ return IRQ_WAKE_THREAD;
+
+ /* Directly handle interrupts since MCQ ESI handlers does the hard job */
+ return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) &
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE));
+}
+
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
{
int err = 0;
@@ -7245,7 +7299,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
* read the response directly ignoring all errors.
*/
- ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
+ ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout);
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -8107,6 +8161,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
*/
dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+ dev_info->ext_wb_sup = get_unaligned_be16(desc_buf +
+ DEVICE_DESC_PARAM_EXT_WB_SUP);
+
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
@@ -8678,7 +8735,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
@@ -8793,6 +8850,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
+ hba->mcq_esi_enabled = !ret;
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
intrs = UFSHCD_ENABLE_MCQ_INTRS;
@@ -10654,7 +10712,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */
- err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr,
+ IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
goto out_disable;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 77f8ddb1f207..37887ec68412 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -5,6 +5,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/devfreq.h>
#include <linux/gpio/consumer.h>
@@ -102,6 +103,24 @@ static const struct __ufs_qcom_bw_table {
[MODE_MAX][0][0] = { 7643136, 819200 },
};
+static const struct {
+ int nminor;
+ char *prefix;
+} testbus_info[TSTBUS_MAX] = {
+ [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"},
+ [TSTBUS_UARM] = {32, "TSTBUS_UARM"},
+ [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"},
+ [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"},
+ [TSTBUS_DFC] = {32, "TSTBUS_DFC"},
+ [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"},
+ [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"},
+ [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"},
+ [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"},
+ [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"},
+ [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"},
+ [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"},
+};
+
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
@@ -173,7 +192,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
profile->ll_ops = ufs_qcom_crypto_ops;
profile->max_dun_bytes_supported = 8;
- profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
+ profile->key_types_supported = qcom_ice_get_supported_key_type(ice);
profile->dev = dev;
/*
@@ -221,17 +240,8 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
- /* Only AES-256-XTS has been tested so far. */
- if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
- return -EOPNOTSUPP;
-
ufshcd_hold(hba);
- err = qcom_ice_program_key(host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->bytes,
- key->crypto_cfg.data_unit_size / 512,
- slot);
+ err = qcom_ice_program_key(host->ice, slot, key);
ufshcd_release(hba);
return err;
}
@@ -250,9 +260,53 @@ static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile,
return err;
}
+static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size,
+ sw_secret);
+}
+
+static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key);
+}
+
+static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_generate_key(host->ice, lt_key);
+}
+
+static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key);
+}
+
static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = {
.keyslot_program = ufs_qcom_ice_keyslot_program,
.keyslot_evict = ufs_qcom_ice_keyslot_evict,
+ .derive_sw_secret = ufs_qcom_ice_derive_sw_secret,
+ .import_key = ufs_qcom_ice_import_key,
+ .generate_key = ufs_qcom_ice_generate_key,
+ .prepare_key = ufs_qcom_ice_prepare_key,
};
#else
@@ -1609,6 +1663,85 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
return 0;
}
+static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int i, j, nminor = 0, testbus_len = 0;
+ u32 *testbus __free(kfree) = NULL;
+ char *prefix;
+
+ testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!testbus)
+ return;
+
+ for (j = 0; j < TSTBUS_MAX; j++) {
+ nminor = testbus_info[j].nminor;
+ prefix = testbus_info[j].prefix;
+ host->testbus.select_major = j;
+ testbus_len = nminor * sizeof(u32);
+ for (i = 0; i < nminor; i++) {
+ host->testbus.select_minor = i;
+ ufs_qcom_testbus_config(host);
+ testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+ }
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 16, 4, testbus, testbus_len, false);
+ }
+}
+
+static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix, enum ufshcd_res id)
+{
+ u32 *regs __free(kfree) = NULL;
+ size_t pos;
+
+ if (offset % 4 != 0 || len % 4 != 0)
+ return -EINVAL;
+
+ regs = kzalloc(len, GFP_ATOMIC);
+ if (!regs)
+ return -ENOMEM;
+
+ for (pos = 0; pos < len; pos += 4)
+ regs[pos / 4] = readl(hba->res[id].base + offset + pos);
+
+ print_hex_dump(KERN_ERR, prefix,
+ len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+ 16, 4, regs, len, false);
+
+ return 0;
+}
+
+static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba)
+{
+ struct dump_info {
+ size_t offset;
+ size_t len;
+ const char *prefix;
+ enum ufshcd_res id;
+ };
+
+ struct dump_info mcq_dumps[] = {
+ {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ},
+ {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ},
+ {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS},
+ {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD},
+ {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD},
+ {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD},
+ {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD},
+ {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD},
+ {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD},
+ {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD},
+ {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD},
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) {
+ ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len,
+ mcq_dumps[i].prefix, mcq_dumps[i].id);
+ cond_resched();
+ }
+}
+
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
u32 reg;
@@ -1616,6 +1749,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
host = ufshcd_get_variant(hba);
+ dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT));
+ dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT));
+
+ dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT));
+ dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT));
+
+ dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n",
+ ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT));
+
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
@@ -1658,6 +1800,23 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
+
+ if (hba->mcq_enabled) {
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ);
+ ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers ");
+ }
+
+ /* ensure below dumps occur only in task context due to blocking calls. */
+ if (in_task()) {
+ /* Dump MCQ Host Vendor Specific Registers */
+ if (hba->mcq_enabled)
+ ufs_qcom_dump_mcq_hci_regs(hba);
+
+ /* voluntarily yield the CPU as we are dumping too much data */
+ ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
+ cond_resched();
+ ufs_qcom_dump_testbus(hba);
+ }
}
/**
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 05d4cb569c50..0a5cfc2dd4f7 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -50,6 +50,8 @@ enum {
*/
UFS_AH8_CFG = 0xFC,
+ UFS_RD_REG_MCQ = 0xD00,
+
REG_UFS_MEM_ICE_CONFIG = 0x260C,
REG_UFS_MEM_ICE_NUM_CORE = 0x2664,
@@ -75,6 +77,15 @@ enum {
UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
};
+/* QCOM UFS HC vendor specific Hibern8 count registers */
+enum {
+ REG_UFS_HW_H8_ENTER_CNT = 0x2700,
+ REG_UFS_SW_H8_ENTER_CNT = 0x2704,
+ REG_UFS_SW_AFTER_HW_H8_ENTER_CNT = 0x2708,
+ REG_UFS_HW_H8_EXIT_CNT = 0x270C,
+ REG_UFS_SW_H8_EXIT_CNT = 0x2710,
+};
+
enum {
UFS_MEM_CQIS_VS = 0x8,
};
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 451c639299eb..2149f49aeec7 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -190,9 +190,10 @@ static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
int ret;
/* Check VF state */
- if (unlikely(hisi_qm_wait_mb_ready(qm))) {
+ ret = hisi_qm_wait_mb_ready(qm);
+ if (unlikely(ret)) {
dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
- return -EBUSY;
+ return ret;
}
ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
@@ -325,13 +326,15 @@ static void qm_dev_cmd_init(struct hisi_qm *qm)
static int vf_qm_cache_wb(struct hisi_qm *qm)
{
unsigned int val;
+ int ret;
writel(0x1, qm->io_base + QM_CACHE_WB_START);
- if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
val, val & BIT(0), MB_POLL_PERIOD_US,
- MB_POLL_TIMEOUT_US)) {
+ MB_POLL_TIMEOUT_US);
+ if (ret) {
dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
- return -EINVAL;
+ return ret;
}
return 0;
@@ -350,6 +353,32 @@ static int vf_qm_func_stop(struct hisi_qm *qm)
return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
}
+static int vf_qm_version_check(struct acc_vf_data *vf_data, struct device *dev)
+{
+ switch (vf_data->acc_magic) {
+ case ACC_DEV_MAGIC_V2:
+ if (vf_data->major_ver != ACC_DRV_MAJOR_VER) {
+ dev_info(dev, "migration driver version<%u.%u> not match!\n",
+ vf_data->major_ver, vf_data->minor_ver);
+ return -EINVAL;
+ }
+ break;
+ case ACC_DEV_MAGIC_V1:
+ /* Correct dma address */
+ vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
+ vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
+ vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
+ vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH];
+ vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
+ vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
struct hisi_acc_vf_migration_file *migf)
{
@@ -363,9 +392,10 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done)
return 0;
- if (vf_data->acc_magic != ACC_DEV_MAGIC) {
+ ret = vf_qm_version_check(vf_data, dev);
+ if (ret) {
dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
- return -EINVAL;
+ return ret;
}
if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
@@ -377,7 +407,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
if (ret <= 0) {
dev_err(dev, "failed to get vft qp nums\n");
- return -EINVAL;
+ return ret;
}
if (ret != vf_data->qp_num) {
@@ -399,13 +429,6 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return -EINVAL;
}
- ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
- if (ret) {
- dev_err(dev, "failed to write QM_VF_STATE\n");
- return ret;
- }
-
- hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
hisi_acc_vdev->match_done = true;
return 0;
}
@@ -418,7 +441,9 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
int vf_id = hisi_acc_vdev->vf_id;
int ret;
- vf_data->acc_magic = ACC_DEV_MAGIC;
+ vf_data->acc_magic = ACC_DEV_MAGIC_V2;
+ vf_data->major_ver = ACC_DRV_MAJOR_VER;
+ vf_data->minor_ver = ACC_DRV_MINOR_VER;
/* Save device id */
vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
@@ -441,6 +466,19 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return 0;
}
+static void vf_qm_xeqc_save(struct hisi_qm *qm,
+ struct hisi_acc_vf_migration_file *migf)
+{
+ struct acc_vf_data *vf_data = &migf->vf_data;
+ u16 eq_head, aeq_head;
+
+ eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF;
+ qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0);
+
+ aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF;
+ qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0);
+}
+
static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
struct hisi_acc_vf_migration_file *migf)
{
@@ -456,6 +494,20 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
if (migf->total_length < sizeof(struct acc_vf_data))
return -EINVAL;
+ if (!vf_data->eqe_dma || !vf_data->aeqe_dma ||
+ !vf_data->sqc_dma || !vf_data->cqc_dma) {
+ dev_info(dev, "resume dma addr is NULL!\n");
+ hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
+ return 0;
+ }
+
+ ret = qm_write_regs(qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_VF_STATE\n");
+ return ret;
+ }
+ hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+
qm->eqe_dma = vf_data->eqe_dma;
qm->aeqe_dma = vf_data->aeqe_dma;
qm->sqc_dma = vf_data->sqc_dma;
@@ -493,27 +545,27 @@ static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data)
ret = qm_get_regs(vf_qm, vf_data);
if (ret)
- return -EINVAL;
+ return ret;
/* Every reg is 32 bit, the dma address is 64 bit. */
- vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
+ vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
- vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
- vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
+ vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
+ vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH];
vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
- vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
+ vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
/* Through SQC_BT/CQC_BT to get sqc and cqc address */
ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
if (ret) {
dev_err(dev, "failed to read SQC addr!\n");
- return -EINVAL;
+ return ret;
}
ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
if (ret) {
dev_err(dev, "failed to read CQC addr!\n");
- return -EINVAL;
+ return ret;
}
return 0;
@@ -524,7 +576,6 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
{
struct acc_vf_data *vf_data = &migf->vf_data;
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
- struct device *dev = &vf_qm->pdev->dev;
int ret;
if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
@@ -538,17 +589,14 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
vf_data->vf_qm_state = QM_READY;
hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
- ret = vf_qm_cache_wb(vf_qm);
- if (ret) {
- dev_err(dev, "failed to writeback QM Cache!\n");
- return ret;
- }
-
ret = vf_qm_read_data(vf_qm, vf_data);
if (ret)
- return -EINVAL;
+ return ret;
migf->total_length = sizeof(struct acc_vf_data);
+ /* Save eqc and aeqc interrupt information */
+ vf_qm_xeqc_save(vf_qm, migf);
+
return 0;
}
@@ -967,6 +1015,13 @@ static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev
dev_err(dev, "failed to check QM INT state!\n");
return ret;
}
+
+ ret = vf_qm_cache_wb(vf_qm);
+ if (ret) {
+ dev_err(dev, "failed to writeback QM cache!\n");
+ return ret;
+ }
+
return 0;
}
@@ -1327,7 +1382,7 @@ static int hisi_acc_vf_debug_check(struct seq_file *seq, struct vfio_device *vde
ret = qm_wait_dev_not_ready(vf_qm);
if (ret) {
seq_puts(seq, "VF device not ready!\n");
- return -EBUSY;
+ return ret;
}
return 0;
@@ -1463,6 +1518,7 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev);
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
mutex_lock(&hisi_acc_vdev->open_mutex);
hisi_acc_vdev->dev_opened = false;
iounmap(vf_qm->io_base);
@@ -1485,6 +1541,7 @@ static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
hisi_acc_vdev->pf_qm = pf_qm;
hisi_acc_vdev->vf_dev = pdev;
+ hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
mutex_init(&hisi_acc_vdev->state_mutex);
mutex_init(&hisi_acc_vdev->open_mutex);
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
index 245d7537b2bc..91002ceeebc1 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
@@ -39,6 +39,9 @@
#define QM_REG_ADDR_OFFSET 0x0004
#define QM_XQC_ADDR_OFFSET 32U
+#define QM_XQC_ADDR_LOW 0x1
+#define QM_XQC_ADDR_HIGH 0x2
+
#define QM_VF_AEQ_INT_MASK 0x0004
#define QM_VF_EQ_INT_MASK 0x000c
#define QM_IFC_INT_SOURCE_V 0x0020
@@ -50,10 +53,15 @@
#define QM_EQC_DW0 0X8000
#define QM_AEQC_DW0 0X8020
+#define ACC_DRV_MAJOR_VER 1
+#define ACC_DRV_MINOR_VER 0
+
+#define ACC_DEV_MAGIC_V1 0XCDCDCDCDFEEDAACC
+#define ACC_DEV_MAGIC_V2 0xAACCFEEDDECADEDE
+
struct acc_vf_data {
#define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state)
/* QM match information */
-#define ACC_DEV_MAGIC 0XCDCDCDCDFEEDAACC
u64 acc_magic;
u32 qp_num;
u32 dev_id;
@@ -61,7 +69,9 @@ struct acc_vf_data {
u32 qp_base;
u32 vf_qm_state;
/* QM reserved match information */
- u32 qm_rsv_state[3];
+ u16 major_ver;
+ u16 minor_ver;
+ u32 qm_rsv_state[2];
/* QM RW regs */
u32 aeq_int_mask;
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 11eda6b207f1..5b919a0b2524 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -313,40 +313,21 @@ err_exec:
return ret;
}
-static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_vhca_data_buffer *buf,
- struct mlx5_vhca_recv_buf *recv_buf,
- u32 *mkey)
+static u32 *alloc_mkey_in(u32 npages, u32 pdn)
{
- size_t npages = buf ? DIV_ROUND_UP(buf->allocated_length, PAGE_SIZE) :
- recv_buf->npages;
- int err = 0, inlen;
- __be64 *mtt;
+ int inlen;
void *mkc;
u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
- sizeof(*mtt) * round_up(npages, 2);
+ sizeof(__be64) * round_up(npages, 2);
- in = kvzalloc(inlen, GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL_ACCOUNT);
if (!in)
- return -ENOMEM;
+ return NULL;
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
DIV_ROUND_UP(npages, 2));
- mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
-
- if (buf) {
- struct sg_dma_page_iter dma_iter;
-
- for_each_sgtable_dma_page(&buf->table.sgt, &dma_iter, 0)
- *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
- } else {
- int i;
-
- for (i = 0; i < npages; i++)
- *mtt++ = cpu_to_be64(recv_buf->dma_addrs[i]);
- }
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
@@ -360,8 +341,81 @@ static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2));
MLX5_SET64(mkc, mkc, len, npages * PAGE_SIZE);
- err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
- kvfree(in);
+
+ return in;
+}
+
+static int create_mkey(struct mlx5_core_dev *mdev, u32 npages, u32 *mkey_in,
+ u32 *mkey)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
+ sizeof(__be64) * round_up(npages, 2);
+
+ return mlx5_core_create_mkey(mdev, mkey, mkey_in, inlen);
+}
+
+static void unregister_dma_pages(struct mlx5_core_dev *mdev, u32 npages,
+ u32 *mkey_in, struct dma_iova_state *state,
+ enum dma_data_direction dir)
+{
+ dma_addr_t addr;
+ __be64 *mtt;
+ int i;
+
+ if (dma_use_iova(state)) {
+ dma_iova_destroy(mdev->device, state, npages * PAGE_SIZE, dir,
+ 0);
+ } else {
+ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in,
+ klm_pas_mtt);
+ for (i = npages - 1; i >= 0; i--) {
+ addr = be64_to_cpu(mtt[i]);
+ dma_unmap_page(mdev->device, addr, PAGE_SIZE, dir);
+ }
+ }
+}
+
+static int register_dma_pages(struct mlx5_core_dev *mdev, u32 npages,
+ struct page **page_list, u32 *mkey_in,
+ struct dma_iova_state *state,
+ enum dma_data_direction dir)
+{
+ dma_addr_t addr;
+ size_t mapped = 0;
+ __be64 *mtt;
+ int i, err;
+
+ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in, klm_pas_mtt);
+
+ if (dma_iova_try_alloc(mdev->device, state, 0, npages * PAGE_SIZE)) {
+ addr = state->addr;
+ for (i = 0; i < npages; i++) {
+ err = dma_iova_link(mdev->device, state,
+ page_to_phys(page_list[i]), mapped,
+ PAGE_SIZE, dir, 0);
+ if (err)
+ goto error;
+ *mtt++ = cpu_to_be64(addr);
+ addr += PAGE_SIZE;
+ mapped += PAGE_SIZE;
+ }
+ err = dma_iova_sync(mdev->device, state, 0, mapped);
+ if (err)
+ goto error;
+ } else {
+ for (i = 0; i < npages; i++) {
+ addr = dma_map_page(mdev->device, page_list[i], 0,
+ PAGE_SIZE, dir);
+ err = dma_mapping_error(mdev->device, addr);
+ if (err)
+ goto error;
+ *mtt++ = cpu_to_be64(addr);
+ }
+ }
+ return 0;
+
+error:
+ unregister_dma_pages(mdev, i, mkey_in, state, dir);
return err;
}
@@ -375,97 +429,97 @@ static int mlx5vf_dma_data_buffer(struct mlx5_vhca_data_buffer *buf)
if (mvdev->mdev_detach)
return -ENOTCONN;
- if (buf->dmaed || !buf->allocated_length)
+ if (buf->mkey_in || !buf->npages)
return -EINVAL;
- ret = dma_map_sgtable(mdev->device, &buf->table.sgt, buf->dma_dir, 0);
- if (ret)
- return ret;
+ buf->mkey_in = alloc_mkey_in(buf->npages, buf->migf->pdn);
+ if (!buf->mkey_in)
+ return -ENOMEM;
- ret = _create_mkey(mdev, buf->migf->pdn, buf, NULL, &buf->mkey);
+ ret = register_dma_pages(mdev, buf->npages, buf->page_list,
+ buf->mkey_in, &buf->state, buf->dma_dir);
if (ret)
- goto err;
+ goto err_register_dma;
- buf->dmaed = true;
+ ret = create_mkey(mdev, buf->npages, buf->mkey_in, &buf->mkey);
+ if (ret)
+ goto err_create_mkey;
return 0;
-err:
- dma_unmap_sgtable(mdev->device, &buf->table.sgt, buf->dma_dir, 0);
+
+err_create_mkey:
+ unregister_dma_pages(mdev, buf->npages, buf->mkey_in, &buf->state,
+ buf->dma_dir);
+err_register_dma:
+ kvfree(buf->mkey_in);
+ buf->mkey_in = NULL;
return ret;
}
+static void free_page_list(u32 npages, struct page **page_list)
+{
+ int i;
+
+ /* Undo alloc_pages_bulk() */
+ for (i = npages - 1; i >= 0; i--)
+ __free_page(page_list[i]);
+
+ kvfree(page_list);
+}
+
void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
{
- struct mlx5_vf_migration_file *migf = buf->migf;
- struct sg_page_iter sg_iter;
+ struct mlx5vf_pci_core_device *mvdev = buf->migf->mvdev;
+ struct mlx5_core_dev *mdev = mvdev->mdev;
- lockdep_assert_held(&migf->mvdev->state_mutex);
- WARN_ON(migf->mvdev->mdev_detach);
+ lockdep_assert_held(&mvdev->state_mutex);
+ WARN_ON(mvdev->mdev_detach);
- if (buf->dmaed) {
- mlx5_core_destroy_mkey(migf->mvdev->mdev, buf->mkey);
- dma_unmap_sgtable(migf->mvdev->mdev->device, &buf->table.sgt,
- buf->dma_dir, 0);
+ if (buf->mkey_in) {
+ mlx5_core_destroy_mkey(mdev, buf->mkey);
+ unregister_dma_pages(mdev, buf->npages, buf->mkey_in,
+ &buf->state, buf->dma_dir);
+ kvfree(buf->mkey_in);
}
- /* Undo alloc_pages_bulk() */
- for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
- __free_page(sg_page_iter_page(&sg_iter));
- sg_free_append_table(&buf->table);
+ free_page_list(buf->npages, buf->page_list);
kfree(buf);
}
-static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
- unsigned int npages)
+static int mlx5vf_add_pages(struct page ***page_list, unsigned int npages)
{
- unsigned int to_alloc = npages;
- struct page **page_list;
- unsigned long filled;
- unsigned int to_fill;
- int ret;
+ unsigned int filled, done = 0;
int i;
- to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
- page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
- if (!page_list)
+ *page_list =
+ kvcalloc(npages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
+ if (!*page_list)
return -ENOMEM;
- do {
- filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
- if (!filled) {
- ret = -ENOMEM;
+ for (;;) {
+ filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, npages - done,
+ *page_list + done);
+ if (!filled)
goto err;
- }
- to_alloc -= filled;
- ret = sg_alloc_append_table_from_pages(
- &buf->table, page_list, filled, 0,
- filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
- GFP_KERNEL_ACCOUNT);
- if (ret)
- goto err_append;
- buf->allocated_length += filled * PAGE_SIZE;
- /* clean input for another bulk allocation */
- memset(page_list, 0, filled * sizeof(*page_list));
- to_fill = min_t(unsigned int, to_alloc,
- PAGE_SIZE / sizeof(*page_list));
- } while (to_alloc > 0);
+ done += filled;
+ if (done == npages)
+ break;
+ }
- kvfree(page_list);
return 0;
-err_append:
- for (i = filled - 1; i >= 0; i--)
- __free_page(page_list[i]);
err:
- kvfree(page_list);
- return ret;
+ for (i = 0; i < done; i++)
+ __free_page(*page_list[i]);
+
+ kvfree(*page_list);
+ *page_list = NULL;
+ return -ENOMEM;
}
struct mlx5_vhca_data_buffer *
-mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
- size_t length,
+mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages,
enum dma_data_direction dma_dir)
{
struct mlx5_vhca_data_buffer *buf;
@@ -477,12 +531,13 @@ mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
buf->dma_dir = dma_dir;
buf->migf = migf;
- if (length) {
- ret = mlx5vf_add_migration_pages(buf,
- DIV_ROUND_UP_ULL(length, PAGE_SIZE));
+ if (npages) {
+ ret = mlx5vf_add_pages(&buf->page_list, npages);
if (ret)
goto end;
+ buf->npages = npages;
+
if (dma_dir != DMA_NONE) {
ret = mlx5vf_dma_data_buffer(buf);
if (ret)
@@ -505,8 +560,8 @@ void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf)
}
struct mlx5_vhca_data_buffer *
-mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
- size_t length, enum dma_data_direction dma_dir)
+mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages,
+ enum dma_data_direction dma_dir)
{
struct mlx5_vhca_data_buffer *buf, *temp_buf;
struct list_head free_list;
@@ -521,7 +576,7 @@ mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
list_for_each_entry_safe(buf, temp_buf, &migf->avail_list, buf_elm) {
if (buf->dma_dir == dma_dir) {
list_del_init(&buf->buf_elm);
- if (buf->allocated_length >= length) {
+ if (buf->npages >= npages) {
spin_unlock_irq(&migf->list_lock);
goto found;
}
@@ -535,7 +590,7 @@ mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
}
}
spin_unlock_irq(&migf->list_lock);
- buf = mlx5vf_alloc_data_buffer(migf, length, dma_dir);
+ buf = mlx5vf_alloc_data_buffer(migf, npages, dma_dir);
found:
while ((temp_buf = list_first_entry_or_null(&free_list,
@@ -716,7 +771,7 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
MLX5_SET(save_vhca_state_in, in, op_mod, 0);
MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(save_vhca_state_in, in, mkey, buf->mkey);
- MLX5_SET(save_vhca_state_in, in, size, buf->allocated_length);
+ MLX5_SET(save_vhca_state_in, in, size, buf->npages * PAGE_SIZE);
MLX5_SET(save_vhca_state_in, in, incremental, inc);
MLX5_SET(save_vhca_state_in, in, set_track, track);
@@ -738,8 +793,11 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
}
if (!header_buf) {
- header_buf = mlx5vf_get_data_buffer(migf,
- sizeof(struct mlx5_vf_migration_header), DMA_NONE);
+ header_buf = mlx5vf_get_data_buffer(
+ migf,
+ DIV_ROUND_UP(sizeof(struct mlx5_vf_migration_header),
+ PAGE_SIZE),
+ DMA_NONE);
if (IS_ERR(header_buf)) {
err = PTR_ERR(header_buf);
goto err_free;
@@ -783,7 +841,7 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
if (mvdev->mdev_detach)
return -ENOTCONN;
- if (!buf->dmaed) {
+ if (!buf->mkey_in) {
err = mlx5vf_dma_data_buffer(buf);
if (err)
return err;
@@ -1338,103 +1396,16 @@ static void mlx5vf_destroy_qp(struct mlx5_core_dev *mdev,
kfree(qp);
}
-static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
-{
- int i;
-
- /* Undo alloc_pages_bulk() */
- for (i = 0; i < recv_buf->npages; i++)
- __free_page(recv_buf->page_list[i]);
-
- kvfree(recv_buf->page_list);
-}
-
-static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
- unsigned int npages)
-{
- unsigned int filled = 0, done = 0;
- int i;
-
- recv_buf->page_list = kvcalloc(npages, sizeof(*recv_buf->page_list),
- GFP_KERNEL_ACCOUNT);
- if (!recv_buf->page_list)
- return -ENOMEM;
-
- for (;;) {
- filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT,
- npages - done,
- recv_buf->page_list + done);
- if (!filled)
- goto err;
-
- done += filled;
- if (done == npages)
- break;
- }
-
- recv_buf->npages = npages;
- return 0;
-
-err:
- for (i = 0; i < npages; i++) {
- if (recv_buf->page_list[i])
- __free_page(recv_buf->page_list[i]);
- }
-
- kvfree(recv_buf->page_list);
- return -ENOMEM;
-}
-
-static int register_dma_recv_pages(struct mlx5_core_dev *mdev,
- struct mlx5_vhca_recv_buf *recv_buf)
-{
- int i, j;
-
- recv_buf->dma_addrs = kvcalloc(recv_buf->npages,
- sizeof(*recv_buf->dma_addrs),
- GFP_KERNEL_ACCOUNT);
- if (!recv_buf->dma_addrs)
- return -ENOMEM;
-
- for (i = 0; i < recv_buf->npages; i++) {
- recv_buf->dma_addrs[i] = dma_map_page(mdev->device,
- recv_buf->page_list[i],
- 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(mdev->device, recv_buf->dma_addrs[i]))
- goto error;
- }
- return 0;
-
-error:
- for (j = 0; j < i; j++)
- dma_unmap_single(mdev->device, recv_buf->dma_addrs[j],
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- kvfree(recv_buf->dma_addrs);
- return -ENOMEM;
-}
-
-static void unregister_dma_recv_pages(struct mlx5_core_dev *mdev,
- struct mlx5_vhca_recv_buf *recv_buf)
-{
- int i;
-
- for (i = 0; i < recv_buf->npages; i++)
- dma_unmap_single(mdev->device, recv_buf->dma_addrs[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- kvfree(recv_buf->dma_addrs);
-}
-
static void mlx5vf_free_qp_recv_resources(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp)
{
struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
mlx5_core_destroy_mkey(mdev, recv_buf->mkey);
- unregister_dma_recv_pages(mdev, recv_buf);
- free_recv_pages(&qp->recv_buf);
+ unregister_dma_pages(mdev, recv_buf->npages, recv_buf->mkey_in,
+ &recv_buf->state, DMA_FROM_DEVICE);
+ kvfree(recv_buf->mkey_in);
+ free_page_list(recv_buf->npages, recv_buf->page_list);
}
static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev,
@@ -1445,24 +1416,38 @@ static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev,
struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
int err;
- err = alloc_recv_pages(recv_buf, npages);
- if (err < 0)
+ err = mlx5vf_add_pages(&recv_buf->page_list, npages);
+ if (err)
return err;
- err = register_dma_recv_pages(mdev, recv_buf);
- if (err)
+ recv_buf->npages = npages;
+
+ recv_buf->mkey_in = alloc_mkey_in(npages, pdn);
+ if (!recv_buf->mkey_in) {
+ err = -ENOMEM;
goto end;
+ }
+
+ err = register_dma_pages(mdev, npages, recv_buf->page_list,
+ recv_buf->mkey_in, &recv_buf->state,
+ DMA_FROM_DEVICE);
+ if (err)
+ goto err_register_dma;
- err = _create_mkey(mdev, pdn, NULL, recv_buf, &recv_buf->mkey);
+ err = create_mkey(mdev, npages, recv_buf->mkey_in, &recv_buf->mkey);
if (err)
goto err_create_mkey;
return 0;
err_create_mkey:
- unregister_dma_recv_pages(mdev, recv_buf);
+ unregister_dma_pages(mdev, npages, recv_buf->mkey_in, &recv_buf->state,
+ DMA_FROM_DEVICE);
+err_register_dma:
+ kvfree(recv_buf->mkey_in);
+ recv_buf->mkey_in = NULL;
end:
- free_recv_pages(recv_buf);
+ free_page_list(npages, recv_buf->page_list);
return err;
}
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index df421dc6de04..d7821b5ca772 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -53,20 +53,17 @@ struct mlx5_vf_migration_header {
};
struct mlx5_vhca_data_buffer {
- struct sg_append_table table;
+ struct page **page_list;
+ struct dma_iova_state state;
loff_t start_pos;
u64 length;
- u64 allocated_length;
+ u32 npages;
u32 mkey;
+ u32 *mkey_in;
enum dma_data_direction dma_dir;
- u8 dmaed:1;
u8 stop_copy_chunk_num;
struct list_head buf_elm;
struct mlx5_vf_migration_file *migf;
- /* Optimize mlx5vf_get_migration_page() for sequential access */
- struct scatterlist *last_offset_sg;
- unsigned int sg_last_entry;
- unsigned long last_offset;
};
struct mlx5vf_async_data {
@@ -133,8 +130,9 @@ struct mlx5_vhca_cq {
struct mlx5_vhca_recv_buf {
u32 npages;
struct page **page_list;
- dma_addr_t *dma_addrs;
+ struct dma_iova_state state;
u32 next_rq_offset;
+ u32 *mkey_in;
u32 mkey;
};
@@ -217,15 +215,24 @@ int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file *migf);
void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf);
void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf);
struct mlx5_vhca_data_buffer *
-mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
- size_t length, enum dma_data_direction dma_dir);
+mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages,
+ enum dma_data_direction dma_dir);
void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf);
struct mlx5_vhca_data_buffer *
-mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
- size_t length, enum dma_data_direction dma_dir);
+mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages,
+ enum dma_data_direction dma_dir);
void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf);
-struct page *mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
- unsigned long offset);
+static inline struct page *
+mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
+ unsigned long offset)
+{
+ int page_entry = offset / PAGE_SIZE;
+
+ if (page_entry >= buf->npages)
+ return NULL;
+
+ return buf->page_list[page_entry];
+}
void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev,
enum mlx5_vf_migf_state *last_save_state);
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 709543e7eb04..93f894fe60d2 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -34,37 +34,6 @@ static struct mlx5vf_pci_core_device *mlx5vf_drvdata(struct pci_dev *pdev)
core_device);
}
-struct page *
-mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
- unsigned long offset)
-{
- unsigned long cur_offset = 0;
- struct scatterlist *sg;
- unsigned int i;
-
- /* All accesses are sequential */
- if (offset < buf->last_offset || !buf->last_offset_sg) {
- buf->last_offset = 0;
- buf->last_offset_sg = buf->table.sgt.sgl;
- buf->sg_last_entry = 0;
- }
-
- cur_offset = buf->last_offset;
-
- for_each_sg(buf->last_offset_sg, sg,
- buf->table.sgt.orig_nents - buf->sg_last_entry, i) {
- if (offset < sg->length + cur_offset) {
- buf->last_offset_sg = sg;
- buf->sg_last_entry += i;
- buf->last_offset = cur_offset;
- return nth_page(sg_page(sg),
- (offset - cur_offset) / PAGE_SIZE);
- }
- cur_offset += sg->length;
- }
- return NULL;
-}
-
static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf)
{
mutex_lock(&migf->lock);
@@ -308,6 +277,7 @@ static struct mlx5_vhca_data_buffer *
mlx5vf_mig_file_get_stop_copy_buf(struct mlx5_vf_migration_file *migf,
u8 index, size_t required_length)
{
+ u32 npages = DIV_ROUND_UP(required_length, PAGE_SIZE);
struct mlx5_vhca_data_buffer *buf = migf->buf[index];
u8 chunk_num;
@@ -315,12 +285,11 @@ mlx5vf_mig_file_get_stop_copy_buf(struct mlx5_vf_migration_file *migf,
chunk_num = buf->stop_copy_chunk_num;
buf->migf->buf[index] = NULL;
/* Checking whether the pre-allocated buffer can fit */
- if (buf->allocated_length >= required_length)
+ if (buf->npages >= npages)
return buf;
mlx5vf_put_data_buffer(buf);
- buf = mlx5vf_get_data_buffer(buf->migf, required_length,
- DMA_FROM_DEVICE);
+ buf = mlx5vf_get_data_buffer(buf->migf, npages, DMA_FROM_DEVICE);
if (IS_ERR(buf))
return buf;
@@ -373,7 +342,8 @@ static int mlx5vf_add_stop_copy_header(struct mlx5_vf_migration_file *migf,
u8 *to_buff;
int ret;
- header_buf = mlx5vf_get_data_buffer(migf, size, DMA_NONE);
+ header_buf = mlx5vf_get_data_buffer(migf, DIV_ROUND_UP(size, PAGE_SIZE),
+ DMA_NONE);
if (IS_ERR(header_buf))
return PTR_ERR(header_buf);
@@ -388,7 +358,7 @@ static int mlx5vf_add_stop_copy_header(struct mlx5_vf_migration_file *migf,
to_buff = kmap_local_page(page);
memcpy(to_buff, &header, sizeof(header));
header_buf->length = sizeof(header);
- data.stop_copy_size = cpu_to_le64(migf->buf[0]->allocated_length);
+ data.stop_copy_size = cpu_to_le64(migf->buf[0]->npages * PAGE_SIZE);
memcpy(to_buff + sizeof(header), &data, sizeof(data));
header_buf->length += sizeof(data);
kunmap_local(to_buff);
@@ -437,15 +407,20 @@ static int mlx5vf_prep_stop_copy(struct mlx5vf_pci_core_device *mvdev,
num_chunks = mvdev->chunk_mode ? MAX_NUM_CHUNKS : 1;
for (i = 0; i < num_chunks; i++) {
- buf = mlx5vf_get_data_buffer(migf, inc_state_size, DMA_FROM_DEVICE);
+ buf = mlx5vf_get_data_buffer(
+ migf, DIV_ROUND_UP(inc_state_size, PAGE_SIZE),
+ DMA_FROM_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto err;
}
migf->buf[i] = buf;
- buf = mlx5vf_get_data_buffer(migf,
- sizeof(struct mlx5_vf_migration_header), DMA_NONE);
+ buf = mlx5vf_get_data_buffer(
+ migf,
+ DIV_ROUND_UP(sizeof(struct mlx5_vf_migration_header),
+ PAGE_SIZE),
+ DMA_NONE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto err;
@@ -553,7 +528,8 @@ static long mlx5vf_precopy_ioctl(struct file *filp, unsigned int cmd,
* We finished transferring the current state and the device has a
* dirty state, save a new state to be ready for.
*/
- buf = mlx5vf_get_data_buffer(migf, inc_length, DMA_FROM_DEVICE);
+ buf = mlx5vf_get_data_buffer(migf, DIV_ROUND_UP(inc_length, PAGE_SIZE),
+ DMA_FROM_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
mlx5vf_mark_err(migf);
@@ -675,8 +651,8 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track)
if (track) {
/* leave the allocated buffer ready for the stop-copy phase */
- buf = mlx5vf_alloc_data_buffer(migf,
- migf->buf[0]->allocated_length, DMA_FROM_DEVICE);
+ buf = mlx5vf_alloc_data_buffer(migf, migf->buf[0]->npages,
+ DMA_FROM_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_pd;
@@ -917,11 +893,14 @@ static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
goto out_unlock;
break;
case MLX5_VF_LOAD_STATE_PREP_HEADER_DATA:
- if (vhca_buf_header->allocated_length < migf->record_size) {
+ {
+ u32 npages = DIV_ROUND_UP(migf->record_size, PAGE_SIZE);
+
+ if (vhca_buf_header->npages < npages) {
mlx5vf_free_data_buffer(vhca_buf_header);
- migf->buf_header[0] = mlx5vf_alloc_data_buffer(migf,
- migf->record_size, DMA_NONE);
+ migf->buf_header[0] = mlx5vf_alloc_data_buffer(
+ migf, npages, DMA_NONE);
if (IS_ERR(migf->buf_header[0])) {
ret = PTR_ERR(migf->buf_header[0]);
migf->buf_header[0] = NULL;
@@ -934,6 +913,7 @@ static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
vhca_buf_header->start_pos = migf->max_pos;
migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER_DATA;
break;
+ }
case MLX5_VF_LOAD_STATE_READ_HEADER_DATA:
ret = mlx5vf_resume_read_header_data(migf, vhca_buf_header,
&buf, &len, pos, &done);
@@ -944,12 +924,13 @@ static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
{
u64 size = max(migf->record_size,
migf->stop_copy_prep_size);
+ u32 npages = DIV_ROUND_UP(size, PAGE_SIZE);
- if (vhca_buf->allocated_length < size) {
+ if (vhca_buf->npages < npages) {
mlx5vf_free_data_buffer(vhca_buf);
- migf->buf[0] = mlx5vf_alloc_data_buffer(migf,
- size, DMA_TO_DEVICE);
+ migf->buf[0] = mlx5vf_alloc_data_buffer(
+ migf, npages, DMA_TO_DEVICE);
if (IS_ERR(migf->buf[0])) {
ret = PTR_ERR(migf->buf[0]);
migf->buf[0] = NULL;
@@ -1037,8 +1018,11 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
}
migf->buf[0] = buf;
- buf = mlx5vf_alloc_data_buffer(migf,
- sizeof(struct mlx5_vf_migration_header), DMA_NONE);
+ buf = mlx5vf_alloc_data_buffer(
+ migf,
+ DIV_ROUND_UP(sizeof(struct mlx5_vf_migration_header),
+ PAGE_SIZE),
+ DMA_NONE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_buf;
@@ -1148,7 +1132,8 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
MLX5VF_QUERY_INC | MLX5VF_QUERY_CLEANUP);
if (ret)
return ERR_PTR(ret);
- buf = mlx5vf_get_data_buffer(migf, size, DMA_FROM_DEVICE);
+ buf = mlx5vf_get_data_buffer(migf,
+ DIV_ROUND_UP(size, PAGE_SIZE), DMA_FROM_DEVICE);
if (IS_ERR(buf))
return ERR_CAST(buf);
/* pre_copy cleanup */
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0ac56072af9f..1136d7ac6b59 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -80,7 +80,6 @@ struct vfio_domain {
struct iommu_domain *domain;
struct list_head next;
struct list_head group_list;
- bool fgsp : 1; /* Fine-grained super pages */
bool enforce_cache_coherency : 1;
};
@@ -293,7 +292,7 @@ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
struct rb_node *p;
for (p = rb_prev(n); p; p = rb_prev(p)) {
- struct vfio_dma *dma = rb_entry(n,
+ struct vfio_dma *dma = rb_entry(p,
struct vfio_dma, node);
vfio_dma_bitmap_free(dma);
@@ -1095,8 +1094,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
* may require hardware cache flushing, try to find the
* largest contiguous physical memory chunk to unmap.
*/
- for (len = PAGE_SIZE;
- !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
+ for (len = PAGE_SIZE; iova + len < end; len += PAGE_SIZE) {
next = iommu_iova_to_phys(domain->domain, iova + len);
if (next != phys + len)
break;
@@ -1833,49 +1831,6 @@ unwind:
return ret;
}
-/*
- * We change our unmap behavior slightly depending on whether the IOMMU
- * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
- * for practically any contiguous power-of-two mapping we give it. This means
- * we don't need to look for contiguous chunks ourselves to make unmapping
- * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
- * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
- * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
- * hugetlbfs is in use.
- */
-static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
-{
- int ret, order = get_order(PAGE_SIZE * 2);
- struct vfio_iova *region;
- struct page *pages;
- dma_addr_t start;
-
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!pages)
- return;
-
- list_for_each_entry(region, regions, list) {
- start = ALIGN(region->start, PAGE_SIZE * 2);
- if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
- continue;
-
- ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
- IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE,
- GFP_KERNEL_ACCOUNT);
- if (!ret) {
- size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
-
- if (unmapped == PAGE_SIZE)
- iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
- else
- domain->fgsp = true;
- }
- break;
- }
-
- __free_pages(pages, order);
-}
-
static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain,
struct iommu_group *iommu_group)
{
@@ -2314,8 +2269,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
}
- vfio_test_domain_fgsp(domain, &iova_copy);
-
/* replay mappings on new domains */
ret = vfio_iommu_replay(iommu, domain);
if (ret)
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
index ff869d883d95..819a97e8ba99 100644
--- a/drivers/virt/coco/Kconfig
+++ b/drivers/virt/coco/Kconfig
@@ -3,10 +3,6 @@
# Confidential computing related collateral
#
-config TSM_REPORTS
- select CONFIGFS_FS
- tristate
-
source "drivers/virt/coco/efi_secret/Kconfig"
source "drivers/virt/coco/pkvm-guest/Kconfig"
@@ -16,3 +12,5 @@ source "drivers/virt/coco/sev-guest/Kconfig"
source "drivers/virt/coco/tdx-guest/Kconfig"
source "drivers/virt/coco/arm-cca-guest/Kconfig"
+
+source "drivers/virt/coco/guest/Kconfig"
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
index c3d07cfc087e..f918bbb61737 100644
--- a/drivers/virt/coco/Makefile
+++ b/drivers/virt/coco/Makefile
@@ -2,9 +2,9 @@
#
# Confidential computing related collateral
#
-obj-$(CONFIG_TSM_REPORTS) += tsm.o
obj-$(CONFIG_EFI_SECRET) += efi_secret/
obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/
obj-$(CONFIG_SEV_GUEST) += sev-guest/
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/
obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest/
+obj-$(CONFIG_TSM_GUEST) += guest/
diff --git a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
index 87f162736b2e..0c9ea24a200c 100644
--- a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
+++ b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
@@ -96,7 +96,7 @@ static int arm_cca_report_new(struct tsm_report *report, void *data)
struct arm_cca_token_info info;
void *buf;
u8 *token __free(kvfree) = NULL;
- struct tsm_desc *desc = &report->desc;
+ struct tsm_report_desc *desc = &report->desc;
if (desc->inblob_len < 32 || desc->inblob_len > 64)
return -EINVAL;
@@ -181,7 +181,7 @@ exit_free_granule_page:
return ret;
}
-static const struct tsm_ops arm_cca_tsm_ops = {
+static const struct tsm_report_ops arm_cca_tsm_ops = {
.name = KBUILD_MODNAME,
.report_new = arm_cca_report_new,
};
@@ -202,7 +202,7 @@ static int __init arm_cca_guest_init(void)
if (!is_realm_world())
return -ENODEV;
- ret = tsm_register(&arm_cca_tsm_ops, NULL);
+ ret = tsm_report_register(&arm_cca_tsm_ops, NULL);
if (ret < 0)
pr_err("Error %d registering with TSM\n", ret);
@@ -216,7 +216,7 @@ module_init(arm_cca_guest_init);
*/
static void __exit arm_cca_guest_exit(void)
{
- tsm_unregister(&arm_cca_tsm_ops);
+ tsm_report_unregister(&arm_cca_tsm_ops);
}
module_exit(arm_cca_guest_exit);
diff --git a/drivers/virt/coco/guest/Kconfig b/drivers/virt/coco/guest/Kconfig
new file mode 100644
index 000000000000..3d5e1d05bf34
--- /dev/null
+++ b/drivers/virt/coco/guest/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Confidential computing shared guest collateral
+#
+config TSM_GUEST
+ bool
+
+config TSM_REPORTS
+ select TSM_GUEST
+ select CONFIGFS_FS
+ tristate
+
+config TSM_MEASUREMENTS
+ select TSM_GUEST
+ select CRYPTO_HASH_INFO
+ select CRYPTO
+ bool
diff --git a/drivers/virt/coco/guest/Makefile b/drivers/virt/coco/guest/Makefile
new file mode 100644
index 000000000000..9ec4860bd213
--- /dev/null
+++ b/drivers/virt/coco/guest/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TSM_REPORTS) += tsm_report.o
+tsm_report-y := report.o
+obj-$(CONFIG_TSM_MEASUREMENTS) += tsm-mr.o
diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/guest/report.c
index 9432d4e303f1..d3d18fc22bc2 100644
--- a/drivers/virt/coco/tsm.c
+++ b/drivers/virt/coco/guest/report.c
@@ -13,8 +13,9 @@
#include <linux/configfs.h>
static struct tsm_provider {
- const struct tsm_ops *ops;
+ const struct tsm_report_ops *ops;
void *data;
+ atomic_t count;
} provider;
static DECLARE_RWSEM(tsm_rwsem);
@@ -92,16 +93,19 @@ static ssize_t tsm_report_privlevel_store(struct config_item *cfg,
if (rc)
return rc;
+ guard(rwsem_write)(&tsm_rwsem);
+ if (!provider.ops)
+ return -ENXIO;
+
/*
* The valid privilege levels that a TSM might accept, if it accepts a
* privilege level setting at all, are a max of TSM_PRIVLEVEL_MAX (see
* SEV-SNP GHCB) and a minimum of a TSM selected floor value no less
* than 0.
*/
- if (provider.ops->privlevel_floor > val || val > TSM_PRIVLEVEL_MAX)
+ if (provider.ops->privlevel_floor > val || val > TSM_REPORT_PRIVLEVEL_MAX)
return -EINVAL;
- guard(rwsem_write)(&tsm_rwsem);
rc = try_advance_write_generation(report);
if (rc)
return rc;
@@ -115,6 +119,10 @@ static ssize_t tsm_report_privlevel_floor_show(struct config_item *cfg,
char *buf)
{
guard(rwsem_read)(&tsm_rwsem);
+
+ if (!provider.ops)
+ return -ENXIO;
+
return sysfs_emit(buf, "%u\n", provider.ops->privlevel_floor);
}
CONFIGFS_ATTR_RO(tsm_report_, privlevel_floor);
@@ -202,7 +210,7 @@ static ssize_t tsm_report_inblob_write(struct config_item *cfg,
memcpy(report->desc.inblob, buf, count);
return count;
}
-CONFIGFS_BIN_ATTR_WO(tsm_report_, inblob, NULL, TSM_INBLOB_MAX);
+CONFIGFS_BIN_ATTR_WO(tsm_report_, inblob, NULL, TSM_REPORT_INBLOB_MAX);
static ssize_t tsm_report_generation_show(struct config_item *cfg, char *buf)
{
@@ -217,6 +225,9 @@ CONFIGFS_ATTR_RO(tsm_report_, generation);
static ssize_t tsm_report_provider_show(struct config_item *cfg, char *buf)
{
guard(rwsem_read)(&tsm_rwsem);
+ if (!provider.ops)
+ return -ENXIO;
+
return sysfs_emit(buf, "%s\n", provider.ops->name);
}
CONFIGFS_ATTR_RO(tsm_report_, provider);
@@ -272,7 +283,7 @@ static ssize_t tsm_report_read(struct tsm_report *report, void *buf,
size_t count, enum tsm_data_select select)
{
struct tsm_report_state *state = to_state(report);
- const struct tsm_ops *ops;
+ const struct tsm_report_ops *ops;
ssize_t rc;
/* try to read from the existing report if present and valid... */
@@ -284,7 +295,7 @@ static ssize_t tsm_report_read(struct tsm_report *report, void *buf,
guard(rwsem_write)(&tsm_rwsem);
ops = provider.ops;
if (!ops)
- return -ENOTTY;
+ return -ENXIO;
if (!report->desc.inblob_len)
return -EINVAL;
@@ -314,7 +325,7 @@ static ssize_t tsm_report_outblob_read(struct config_item *cfg, void *buf,
return tsm_report_read(report, buf, count, TSM_REPORT);
}
-CONFIGFS_BIN_ATTR_RO(tsm_report_, outblob, NULL, TSM_OUTBLOB_MAX);
+CONFIGFS_BIN_ATTR_RO(tsm_report_, outblob, NULL, TSM_REPORT_OUTBLOB_MAX);
static ssize_t tsm_report_auxblob_read(struct config_item *cfg, void *buf,
size_t count)
@@ -323,7 +334,7 @@ static ssize_t tsm_report_auxblob_read(struct config_item *cfg, void *buf,
return tsm_report_read(report, buf, count, TSM_CERTS);
}
-CONFIGFS_BIN_ATTR_RO(tsm_report_, auxblob, NULL, TSM_OUTBLOB_MAX);
+CONFIGFS_BIN_ATTR_RO(tsm_report_, auxblob, NULL, TSM_REPORT_OUTBLOB_MAX);
static ssize_t tsm_report_manifestblob_read(struct config_item *cfg, void *buf,
size_t count)
@@ -332,7 +343,7 @@ static ssize_t tsm_report_manifestblob_read(struct config_item *cfg, void *buf,
return tsm_report_read(report, buf, count, TSM_MANIFEST);
}
-CONFIGFS_BIN_ATTR_RO(tsm_report_, manifestblob, NULL, TSM_OUTBLOB_MAX);
+CONFIGFS_BIN_ATTR_RO(tsm_report_, manifestblob, NULL, TSM_REPORT_OUTBLOB_MAX);
static struct configfs_attribute *tsm_report_attrs[] = {
[TSM_REPORT_GENERATION] = &tsm_report_attr_generation,
@@ -421,12 +432,20 @@ static struct config_item *tsm_report_make_item(struct config_group *group,
if (!state)
return ERR_PTR(-ENOMEM);
+ atomic_inc(&provider.count);
config_item_init_type_name(&state->cfg, name, &tsm_report_type);
return &state->cfg;
}
+static void tsm_report_drop_item(struct config_group *group, struct config_item *item)
+{
+ config_item_put(item);
+ atomic_dec(&provider.count);
+}
+
static struct configfs_group_operations tsm_report_group_ops = {
.make_item = tsm_report_make_item,
+ .drop_item = tsm_report_drop_item,
};
static const struct config_item_type tsm_reports_type = {
@@ -448,9 +467,9 @@ static struct configfs_subsystem tsm_configfs = {
.su_mutex = __MUTEX_INITIALIZER(tsm_configfs.su_mutex),
};
-int tsm_register(const struct tsm_ops *ops, void *priv)
+int tsm_report_register(const struct tsm_report_ops *ops, void *priv)
{
- const struct tsm_ops *conflict;
+ const struct tsm_report_ops *conflict;
guard(rwsem_write)(&tsm_rwsem);
conflict = provider.ops;
@@ -459,26 +478,34 @@ int tsm_register(const struct tsm_ops *ops, void *priv)
return -EBUSY;
}
+ if (atomic_read(&provider.count)) {
+ pr_err("configfs/tsm/report not empty\n");
+ return -EBUSY;
+ }
+
provider.ops = ops;
provider.data = priv;
return 0;
}
-EXPORT_SYMBOL_GPL(tsm_register);
+EXPORT_SYMBOL_GPL(tsm_report_register);
-int tsm_unregister(const struct tsm_ops *ops)
+int tsm_report_unregister(const struct tsm_report_ops *ops)
{
guard(rwsem_write)(&tsm_rwsem);
if (ops != provider.ops)
return -EBUSY;
+ if (atomic_read(&provider.count))
+ pr_warn("\"%s\" unregistered with items present in configfs/tsm/report\n",
+ provider.ops->name);
provider.ops = NULL;
provider.data = NULL;
return 0;
}
-EXPORT_SYMBOL_GPL(tsm_unregister);
+EXPORT_SYMBOL_GPL(tsm_report_unregister);
static struct config_group *tsm_report_group;
-static int __init tsm_init(void)
+static int __init tsm_report_init(void)
{
struct config_group *root = &tsm_configfs.su_group;
struct config_group *tsm;
@@ -499,14 +526,14 @@ static int __init tsm_init(void)
return 0;
}
-module_init(tsm_init);
+module_init(tsm_report_init);
-static void __exit tsm_exit(void)
+static void __exit tsm_report_exit(void)
{
configfs_unregister_default_group(tsm_report_group);
configfs_unregister_subsystem(&tsm_configfs);
}
-module_exit(tsm_exit);
+module_exit(tsm_report_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Provide Trusted Security Module attestation reports via configfs");
diff --git a/drivers/virt/coco/guest/tsm-mr.c b/drivers/virt/coco/guest/tsm-mr.c
new file mode 100644
index 000000000000..feb30af90a20
--- /dev/null
+++ b/drivers/virt/coco/guest/tsm-mr.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/tsm_mr.h>
+
+/*
+ * struct tm_context - contains everything necessary to implement sysfs
+ * attributes for MRs.
+ * @rwsem: protects the MR cache from concurrent access.
+ * @agrp: contains all MR attributes created by tsm_mr_create_attribute_group().
+ * @tm: input to tsm_mr_create_attribute_group() containing MR definitions/ops.
+ * @in_sync: %true if MR cache is up-to-date.
+ * @mrs: array of &struct bin_attribute, one for each MR.
+ *
+ * This internal structure contains everything needed to implement
+ * tm_digest_read() and tm_digest_write().
+ *
+ * Given tm->refresh() is potentially expensive, tm_digest_read() caches MR
+ * values and calls tm->refresh() only when necessary. Only live MRs (i.e., with
+ * %TSM_MR_F_LIVE set) can trigger tm->refresh(), while others are assumed to
+ * retain their values from the last tm->write(). @in_sync tracks if there have
+ * been tm->write() calls since the last tm->refresh(). That is, tm->refresh()
+ * will be called only when a live MR is being read and the cache is stale
+ * (@in_sync is %false).
+ *
+ * tm_digest_write() sets @in_sync to %false and calls tm->write(), whose
+ * semantics is arch and MR specific. Most (if not all) writable MRs support the
+ * extension semantics (i.e., tm->write() extends the input buffer into the MR).
+ */
+struct tm_context {
+ struct rw_semaphore rwsem;
+ struct attribute_group agrp;
+ const struct tsm_measurements *tm;
+ bool in_sync;
+ struct bin_attribute mrs[];
+};
+
+static ssize_t tm_digest_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buffer,
+ loff_t off, size_t count)
+{
+ struct tm_context *ctx;
+ const struct tsm_measurement_register *mr;
+ int rc;
+
+ ctx = attr->private;
+ rc = down_read_interruptible(&ctx->rwsem);
+ if (rc)
+ return rc;
+
+ mr = &ctx->tm->mrs[attr - ctx->mrs];
+
+ /*
+ * @ctx->in_sync indicates if the MR cache is stale. It is a global
+ * instead of a per-MR flag for simplicity, as most (if not all) archs
+ * allow reading all MRs in oneshot.
+ *
+ * ctx->refresh() is necessary only for LIVE MRs, while others retain
+ * their values from their respective last ctx->write().
+ */
+ if ((mr->mr_flags & TSM_MR_F_LIVE) && !ctx->in_sync) {
+ up_read(&ctx->rwsem);
+
+ rc = down_write_killable(&ctx->rwsem);
+ if (rc)
+ return rc;
+
+ if (!ctx->in_sync) {
+ rc = ctx->tm->refresh(ctx->tm);
+ ctx->in_sync = !rc;
+ trace_tsm_mr_refresh(mr, rc);
+ }
+
+ downgrade_write(&ctx->rwsem);
+ }
+
+ memcpy(buffer, mr->mr_value + off, count);
+ trace_tsm_mr_read(mr);
+
+ up_read(&ctx->rwsem);
+ return rc ?: count;
+}
+
+static ssize_t tm_digest_write(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buffer,
+ loff_t off, size_t count)
+{
+ struct tm_context *ctx;
+ const struct tsm_measurement_register *mr;
+ ssize_t rc;
+
+ /* partial writes are not supported */
+ if (off != 0 || count != attr->size)
+ return -EINVAL;
+
+ ctx = attr->private;
+ mr = &ctx->tm->mrs[attr - ctx->mrs];
+
+ rc = down_write_killable(&ctx->rwsem);
+ if (rc)
+ return rc;
+
+ rc = ctx->tm->write(ctx->tm, mr, buffer);
+
+ /* mark MR cache stale */
+ if (!rc) {
+ ctx->in_sync = false;
+ trace_tsm_mr_write(mr, buffer);
+ }
+
+ up_write(&ctx->rwsem);
+ return rc ?: count;
+}
+
+/**
+ * tsm_mr_create_attribute_group() - creates an attribute group for measurement
+ * registers (MRs)
+ * @tm: pointer to &struct tsm_measurements containing the MR definitions.
+ *
+ * This function creates attributes corresponding to the MR definitions
+ * provided by @tm->mrs.
+ *
+ * The created attributes will reference @tm and its members. The caller must
+ * not free @tm until after tsm_mr_free_attribute_group() is called.
+ *
+ * Context: Process context. May sleep due to memory allocation.
+ *
+ * Return:
+ * * On success, the pointer to a an attribute group is returned; otherwise
+ * * %-EINVAL - Invalid MR definitions.
+ * * %-ENOMEM - Out of memory.
+ */
+const struct attribute_group *
+tsm_mr_create_attribute_group(const struct tsm_measurements *tm)
+{
+ size_t nlen;
+
+ if (!tm || !tm->mrs)
+ return ERR_PTR(-EINVAL);
+
+ /* aggregated length of all MR names */
+ nlen = 0;
+ for (size_t i = 0; i < tm->nr_mrs; ++i) {
+ if ((tm->mrs[i].mr_flags & TSM_MR_F_LIVE) && !tm->refresh)
+ return ERR_PTR(-EINVAL);
+
+ if ((tm->mrs[i].mr_flags & TSM_MR_F_WRITABLE) && !tm->write)
+ return ERR_PTR(-EINVAL);
+
+ if (!tm->mrs[i].mr_name)
+ return ERR_PTR(-EINVAL);
+
+ if (tm->mrs[i].mr_flags & TSM_MR_F_NOHASH)
+ continue;
+
+ if (tm->mrs[i].mr_hash >= HASH_ALGO__LAST)
+ return ERR_PTR(-EINVAL);
+
+ /* MR sysfs attribute names have the form of MRNAME:HASH */
+ nlen += strlen(tm->mrs[i].mr_name) + 1 +
+ strlen(hash_algo_name[tm->mrs[i].mr_hash]) + 1;
+ }
+
+ /*
+ * @attrs and the MR name strings are combined into a single allocation
+ * so that we don't have to free MR names one-by-one in
+ * tsm_mr_free_attribute_group()
+ */
+ const struct bin_attribute **attrs __free(kfree) =
+ kzalloc(sizeof(*attrs) * (tm->nr_mrs + 1) + nlen, GFP_KERNEL);
+ struct tm_context *ctx __free(kfree) =
+ kzalloc(struct_size(ctx, mrs, tm->nr_mrs), GFP_KERNEL);
+ char *name, *end;
+
+ if (!ctx || !attrs)
+ return ERR_PTR(-ENOMEM);
+
+ /* @attrs is followed immediately by MR name strings */
+ name = (char *)&attrs[tm->nr_mrs + 1];
+ end = name + nlen;
+
+ for (size_t i = 0; i < tm->nr_mrs; ++i) {
+ struct bin_attribute *bap = &ctx->mrs[i];
+
+ sysfs_bin_attr_init(bap);
+
+ if (tm->mrs[i].mr_flags & TSM_MR_F_NOHASH)
+ bap->attr.name = tm->mrs[i].mr_name;
+ else if (name < end) {
+ bap->attr.name = name;
+ name += snprintf(name, end - name, "%s:%s",
+ tm->mrs[i].mr_name,
+ hash_algo_name[tm->mrs[i].mr_hash]);
+ ++name;
+ } else
+ return ERR_PTR(-EINVAL);
+
+ /* check for duplicated MR definitions */
+ for (size_t j = 0; j < i; ++j)
+ if (!strcmp(bap->attr.name, attrs[j]->attr.name))
+ return ERR_PTR(-EINVAL);
+
+ if (tm->mrs[i].mr_flags & TSM_MR_F_READABLE) {
+ bap->attr.mode |= 0444;
+ bap->read_new = tm_digest_read;
+ }
+
+ if (tm->mrs[i].mr_flags & TSM_MR_F_WRITABLE) {
+ bap->attr.mode |= 0200;
+ bap->write_new = tm_digest_write;
+ }
+
+ bap->size = tm->mrs[i].mr_size;
+ bap->private = ctx;
+
+ attrs[i] = bap;
+ }
+
+ if (name != end)
+ return ERR_PTR(-EINVAL);
+
+ init_rwsem(&ctx->rwsem);
+ ctx->agrp.name = "measurements";
+ ctx->agrp.bin_attrs_new = no_free_ptr(attrs);
+ ctx->tm = tm;
+ return &no_free_ptr(ctx)->agrp;
+}
+EXPORT_SYMBOL_GPL(tsm_mr_create_attribute_group);
+
+/**
+ * tsm_mr_free_attribute_group() - frees the attribute group returned by
+ * tsm_mr_create_attribute_group()
+ * @attr_grp: attribute group returned by tsm_mr_create_attribute_group()
+ *
+ * Context: Process context.
+ */
+void tsm_mr_free_attribute_group(const struct attribute_group *attr_grp)
+{
+ if (!IS_ERR_OR_NULL(attr_grp)) {
+ kfree(attr_grp->bin_attrs_new);
+ kfree(container_of(attr_grp, struct tm_context, agrp));
+ }
+}
+EXPORT_SYMBOL_GPL(tsm_mr_free_attribute_group);
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index cf3fb61f4d5b..7a4e2188f109 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -346,7 +346,7 @@ struct snp_msg_cert_entry {
static int sev_svsm_report_new(struct tsm_report *report, void *data)
{
unsigned int rep_len, man_len, certs_len;
- struct tsm_desc *desc = &report->desc;
+ struct tsm_report_desc *desc = &report->desc;
struct svsm_attest_call ac = {};
unsigned int retry_count;
void *rep, *man, *certs;
@@ -481,7 +481,7 @@ retry:
static int sev_report_new(struct tsm_report *report, void *data)
{
struct snp_msg_cert_entry *cert_table;
- struct tsm_desc *desc = &report->desc;
+ struct tsm_report_desc *desc = &report->desc;
struct snp_guest_dev *snp_dev = data;
struct snp_msg_report_resp_hdr hdr;
const u32 report_size = SZ_4K;
@@ -610,7 +610,7 @@ static bool sev_report_bin_attr_visible(int n)
return false;
}
-static struct tsm_ops sev_tsm_ops = {
+static struct tsm_report_ops sev_tsm_report_ops = {
.name = KBUILD_MODNAME,
.report_new = sev_report_new,
.report_attr_visible = sev_report_attr_visible,
@@ -619,7 +619,7 @@ static struct tsm_ops sev_tsm_ops = {
static void unregister_sev_tsm(void *data)
{
- tsm_unregister(&sev_tsm_ops);
+ tsm_report_unregister(&sev_tsm_report_ops);
}
static int __init sev_guest_probe(struct platform_device *pdev)
@@ -656,9 +656,9 @@ static int __init sev_guest_probe(struct platform_device *pdev)
misc->fops = &snp_guest_fops;
/* Set the privlevel_floor attribute based on the vmpck_id */
- sev_tsm_ops.privlevel_floor = mdesc->vmpck_id;
+ sev_tsm_report_ops.privlevel_floor = mdesc->vmpck_id;
- ret = tsm_register(&sev_tsm_ops, snp_dev);
+ ret = tsm_report_register(&sev_tsm_report_ops, snp_dev);
if (ret)
goto e_msg_init;
diff --git a/drivers/virt/coco/tdx-guest/Kconfig b/drivers/virt/coco/tdx-guest/Kconfig
index 22dd59e19431..dbbdc14383b1 100644
--- a/drivers/virt/coco/tdx-guest/Kconfig
+++ b/drivers/virt/coco/tdx-guest/Kconfig
@@ -2,6 +2,7 @@ config TDX_GUEST_DRIVER
tristate "TDX Guest driver"
depends on INTEL_TDX_GUEST
select TSM_REPORTS
+ select TSM_MEASUREMENTS
help
The driver provides userspace interface to communicate with
the TDX module to request the TDX guest details like attestation
diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c
index 224e7dde9cde..4e239ec960c9 100644
--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
+++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
@@ -5,6 +5,8 @@
* Copyright (C) 2022 Intel Corporation
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
@@ -15,14 +17,146 @@
#include <linux/set_memory.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/sockptr.h>
#include <linux/tsm.h>
-#include <linux/sizes.h>
+#include <linux/tsm-mr.h>
#include <uapi/linux/tdx-guest.h>
#include <asm/cpu_device_id.h>
#include <asm/tdx.h>
+/* TDREPORT buffer */
+static u8 *tdx_report_buf;
+
+/* Lock to serialize TDG.MR.REPORT and TDG.MR.RTMR.EXTEND TDCALLs */
+static DEFINE_MUTEX(mr_lock);
+
+/* TDREPORT fields */
+enum {
+ TDREPORT_reportdata = 128,
+ TDREPORT_tee_tcb_info = 256,
+ TDREPORT_tdinfo = TDREPORT_tee_tcb_info + 256,
+ TDREPORT_attributes = TDREPORT_tdinfo,
+ TDREPORT_xfam = TDREPORT_attributes + sizeof(u64),
+ TDREPORT_mrtd = TDREPORT_xfam + sizeof(u64),
+ TDREPORT_mrconfigid = TDREPORT_mrtd + SHA384_DIGEST_SIZE,
+ TDREPORT_mrowner = TDREPORT_mrconfigid + SHA384_DIGEST_SIZE,
+ TDREPORT_mrownerconfig = TDREPORT_mrowner + SHA384_DIGEST_SIZE,
+ TDREPORT_rtmr0 = TDREPORT_mrownerconfig + SHA384_DIGEST_SIZE,
+ TDREPORT_rtmr1 = TDREPORT_rtmr0 + SHA384_DIGEST_SIZE,
+ TDREPORT_rtmr2 = TDREPORT_rtmr1 + SHA384_DIGEST_SIZE,
+ TDREPORT_rtmr3 = TDREPORT_rtmr2 + SHA384_DIGEST_SIZE,
+ TDREPORT_servtd_hash = TDREPORT_rtmr3 + SHA384_DIGEST_SIZE,
+};
+
+static int tdx_do_report(sockptr_t data, sockptr_t tdreport)
+{
+ scoped_cond_guard(mutex_intr, return -EINTR, &mr_lock) {
+ u8 *reportdata = tdx_report_buf + TDREPORT_reportdata;
+ int ret;
+
+ if (!sockptr_is_null(data) &&
+ copy_from_sockptr(reportdata, data, TDX_REPORTDATA_LEN))
+ return -EFAULT;
+
+ ret = tdx_mcall_get_report0(reportdata, tdx_report_buf);
+ if (WARN_ONCE(ret, "tdx_mcall_get_report0() failed: %d", ret))
+ return ret;
+
+ if (!sockptr_is_null(tdreport) &&
+ copy_to_sockptr(tdreport, tdx_report_buf, TDX_REPORT_LEN))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int tdx_do_extend(u8 mr_ind, const u8 *data)
+{
+ scoped_cond_guard(mutex_intr, return -EINTR, &mr_lock) {
+ /*
+ * TDX requires @extend_buf to be 64-byte aligned.
+ * It's safe to use REPORTDATA buffer for that purpose because
+ * tdx_mr_report/extend_lock() are mutually exclusive.
+ */
+ u8 *extend_buf = tdx_report_buf + TDREPORT_reportdata;
+ int ret;
+
+ memcpy(extend_buf, data, SHA384_DIGEST_SIZE);
+
+ ret = tdx_mcall_extend_rtmr(mr_ind, extend_buf);
+ if (WARN_ONCE(ret, "tdx_mcall_extend_rtmr(%u) failed: %d", mr_ind, ret))
+ return ret;
+ }
+ return 0;
+}
+
+#define TDX_MR_(r) .mr_value = (void *)TDREPORT_##r, TSM_MR_(r, SHA384)
+static struct tsm_measurement_register tdx_mrs[] = {
+ { TDX_MR_(rtmr0) | TSM_MR_F_RTMR },
+ { TDX_MR_(rtmr1) | TSM_MR_F_RTMR },
+ { TDX_MR_(rtmr2) | TSM_MR_F_RTMR },
+ { TDX_MR_(rtmr3) | TSM_MR_F_RTMR },
+ { TDX_MR_(mrtd) },
+ { TDX_MR_(mrconfigid) | TSM_MR_F_NOHASH },
+ { TDX_MR_(mrowner) | TSM_MR_F_NOHASH },
+ { TDX_MR_(mrownerconfig) | TSM_MR_F_NOHASH },
+};
+#undef TDX_MR_
+
+static int tdx_mr_refresh(const struct tsm_measurements *tm)
+{
+ return tdx_do_report(KERNEL_SOCKPTR(NULL), KERNEL_SOCKPTR(NULL));
+}
+
+static int tdx_mr_extend(const struct tsm_measurements *tm,
+ const struct tsm_measurement_register *mr,
+ const u8 *data)
+{
+ return tdx_do_extend(mr - tm->mrs, data);
+}
+
+static struct tsm_measurements tdx_measurements = {
+ .mrs = tdx_mrs,
+ .nr_mrs = ARRAY_SIZE(tdx_mrs),
+ .refresh = tdx_mr_refresh,
+ .write = tdx_mr_extend,
+};
+
+static const struct attribute_group *tdx_mr_init(void)
+{
+ const struct attribute_group *g;
+ int rc;
+
+ u8 *buf __free(kfree) = kzalloc(TDX_REPORT_LEN, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ tdx_report_buf = buf;
+ rc = tdx_mr_refresh(&tdx_measurements);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /*
+ * @mr_value was initialized with the offset only, while the base
+ * address is being added here.
+ */
+ for (size_t i = 0; i < ARRAY_SIZE(tdx_mrs); ++i)
+ *(long *)&tdx_mrs[i].mr_value += (long)buf;
+
+ g = tsm_mr_create_attribute_group(&tdx_measurements);
+ if (!IS_ERR(g))
+ tdx_report_buf = no_free_ptr(buf);
+
+ return g;
+}
+
+static void tdx_mr_deinit(const struct attribute_group *mr_grp)
+{
+ tsm_mr_free_attribute_group(mr_grp);
+ kfree(tdx_report_buf);
+}
+
/*
* Intel's SGX QE implementation generally uses Quote size less
* than 8K (2K Quote data + ~5K of certificate blob).
@@ -68,37 +202,8 @@ static u32 getquote_timeout = 30;
static long tdx_get_report0(struct tdx_report_req __user *req)
{
- u8 *reportdata, *tdreport;
- long ret;
-
- reportdata = kmalloc(TDX_REPORTDATA_LEN, GFP_KERNEL);
- if (!reportdata)
- return -ENOMEM;
-
- tdreport = kzalloc(TDX_REPORT_LEN, GFP_KERNEL);
- if (!tdreport) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(reportdata, req->reportdata, TDX_REPORTDATA_LEN)) {
- ret = -EFAULT;
- goto out;
- }
-
- /* Generate TDREPORT0 using "TDG.MR.REPORT" TDCALL */
- ret = tdx_mcall_get_report0(reportdata, tdreport);
- if (ret)
- goto out;
-
- if (copy_to_user(req->tdreport, tdreport, TDX_REPORT_LEN))
- ret = -EFAULT;
-
-out:
- kfree(reportdata);
- kfree(tdreport);
-
- return ret;
+ return tdx_do_report(USER_SOCKPTR(req->reportdata),
+ USER_SOCKPTR(req->tdreport));
}
static void free_quote_buf(void *buf)
@@ -157,53 +262,24 @@ static int wait_for_quote_completion(struct tdx_quote_buf *quote_buf, u32 timeou
return (i == timeout) ? -ETIMEDOUT : 0;
}
-static int tdx_report_new(struct tsm_report *report, void *data)
+static int tdx_report_new_locked(struct tsm_report *report, void *data)
{
- u8 *buf, *reportdata = NULL, *tdreport = NULL;
+ u8 *buf;
struct tdx_quote_buf *quote_buf = quote_data;
- struct tsm_desc *desc = &report->desc;
+ struct tsm_report_desc *desc = &report->desc;
int ret;
u64 err;
- /* TODO: switch to guard(mutex_intr) */
- if (mutex_lock_interruptible(&quote_lock))
- return -EINTR;
-
/*
* If the previous request is timedout or interrupted, and the
* Quote buf status is still in GET_QUOTE_IN_FLIGHT (owned by
* VMM), don't permit any new request.
*/
- if (quote_buf->status == GET_QUOTE_IN_FLIGHT) {
- ret = -EBUSY;
- goto done;
- }
-
- if (desc->inblob_len != TDX_REPORTDATA_LEN) {
- ret = -EINVAL;
- goto done;
- }
-
- reportdata = kmalloc(TDX_REPORTDATA_LEN, GFP_KERNEL);
- if (!reportdata) {
- ret = -ENOMEM;
- goto done;
- }
+ if (quote_buf->status == GET_QUOTE_IN_FLIGHT)
+ return -EBUSY;
- tdreport = kzalloc(TDX_REPORT_LEN, GFP_KERNEL);
- if (!tdreport) {
- ret = -ENOMEM;
- goto done;
- }
-
- memcpy(reportdata, desc->inblob, desc->inblob_len);
-
- /* Generate TDREPORT0 using "TDG.MR.REPORT" TDCALL */
- ret = tdx_mcall_get_report0(reportdata, tdreport);
- if (ret) {
- pr_err("GetReport call failed\n");
- goto done;
- }
+ if (desc->inblob_len != TDX_REPORTDATA_LEN)
+ return -EINVAL;
memset(quote_data, 0, GET_QUOTE_BUF_SIZE);
@@ -211,26 +287,26 @@ static int tdx_report_new(struct tsm_report *report, void *data)
quote_buf->version = GET_QUOTE_CMD_VER;
quote_buf->in_len = TDX_REPORT_LEN;
- memcpy(quote_buf->data, tdreport, TDX_REPORT_LEN);
+ ret = tdx_do_report(KERNEL_SOCKPTR(desc->inblob),
+ KERNEL_SOCKPTR(quote_buf->data));
+ if (ret)
+ return ret;
err = tdx_hcall_get_quote(quote_data, GET_QUOTE_BUF_SIZE);
if (err) {
pr_err("GetQuote hypercall failed, status:%llx\n", err);
- ret = -EIO;
- goto done;
+ return -EIO;
}
ret = wait_for_quote_completion(quote_buf, getquote_timeout);
if (ret) {
pr_err("GetQuote request timedout\n");
- goto done;
+ return ret;
}
buf = kvmemdup(quote_buf->data, quote_buf->out_len, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto done;
- }
+ if (!buf)
+ return -ENOMEM;
report->outblob = buf;
report->outblob_len = quote_buf->out_len;
@@ -239,14 +315,16 @@ static int tdx_report_new(struct tsm_report *report, void *data)
* TODO: parse the PEM-formatted cert chain out of the quote buffer when
* provided
*/
-done:
- mutex_unlock(&quote_lock);
- kfree(reportdata);
- kfree(tdreport);
return ret;
}
+static int tdx_report_new(struct tsm_report *report, void *data)
+{
+ scoped_cond_guard(mutex_intr, return -EINTR, &quote_lock)
+ return tdx_report_new_locked(report, data);
+}
+
static bool tdx_report_attr_visible(int n)
{
switch (n) {
@@ -285,10 +363,16 @@ static const struct file_operations tdx_guest_fops = {
.unlocked_ioctl = tdx_guest_ioctl,
};
+static const struct attribute_group *tdx_attr_groups[] = {
+ NULL, /* measurements */
+ NULL
+};
+
static struct miscdevice tdx_misc_dev = {
.name = KBUILD_MODNAME,
.minor = MISC_DYNAMIC_MINOR,
.fops = &tdx_guest_fops,
+ .groups = tdx_attr_groups,
};
static const struct x86_cpu_id tdx_guest_ids[] = {
@@ -297,7 +381,7 @@ static const struct x86_cpu_id tdx_guest_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, tdx_guest_ids);
-static const struct tsm_ops tdx_tsm_ops = {
+static const struct tsm_report_ops tdx_tsm_ops = {
.name = KBUILD_MODNAME,
.report_new = tdx_report_new,
.report_attr_visible = tdx_report_attr_visible,
@@ -311,9 +395,13 @@ static int __init tdx_guest_init(void)
if (!x86_match_cpu(tdx_guest_ids))
return -ENODEV;
+ tdx_attr_groups[0] = tdx_mr_init();
+ if (IS_ERR(tdx_attr_groups[0]))
+ return PTR_ERR(tdx_attr_groups[0]);
+
ret = misc_register(&tdx_misc_dev);
if (ret)
- return ret;
+ goto deinit_mr;
quote_data = alloc_quote_buf();
if (!quote_data) {
@@ -322,7 +410,7 @@ static int __init tdx_guest_init(void)
goto free_misc;
}
- ret = tsm_register(&tdx_tsm_ops, NULL);
+ ret = tsm_report_register(&tdx_tsm_ops, NULL);
if (ret)
goto free_quote;
@@ -332,6 +420,8 @@ free_quote:
free_quote_buf(quote_data);
free_misc:
misc_deregister(&tdx_misc_dev);
+deinit_mr:
+ tdx_mr_deinit(tdx_attr_groups[0]);
return ret;
}
@@ -339,9 +429,10 @@ module_init(tdx_guest_init);
static void __exit tdx_guest_exit(void)
{
- tsm_unregister(&tdx_tsm_ops);
+ tsm_report_unregister(&tdx_tsm_ops);
free_quote_buf(quote_data);
misc_deregister(&tdx_misc_dev);
+ tdx_mr_deinit(tdx_attr_groups[0]);
}
module_exit(tdx_guest_exit);