summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c13
-rw-r--r--drivers/acpi/acpi_mrrm.c4
-rw-r--r--drivers/acpi/acpica/utnonansi.c2
-rw-r--r--drivers/acpi/apei/einj-core.c2
-rw-r--r--drivers/acpi/irq.c16
-rw-r--r--drivers/acpi/numa/srat.c15
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c39
-rw-r--r--drivers/ata/libata-sata.c12
-rw-r--r--drivers/ata/libata-scsi.c31
-rw-r--r--drivers/ata/libata.h3
-rw-r--r--drivers/ata/sata_sx4.c30
-rw-r--r--drivers/base/memory.c51
-rw-r--r--drivers/base/node.c9
-rw-r--r--drivers/base/property.c12
-rw-r--r--drivers/block/zram/backend_deflate.c12
-rw-r--r--drivers/block/zram/backend_lz4.c2
-rw-r--r--drivers/block/zram/backend_lz4hc.c2
-rw-r--r--drivers/block/zram/backend_zstd.c2
-rw-r--r--drivers/block/zram/zcomp.h9
-rw-r--r--drivers/block/zram/zram_drv.c352
-rw-r--r--drivers/bus/brcmstb_gisb.c10
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c2
-rw-r--r--drivers/bus/fsl-mc/dprc.c4
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c21
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c12
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h2
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c11
-rw-r--r--drivers/bus/fsl-mc/mc-io.c19
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/ti-sysc.c68
-rw-r--r--drivers/cache/sifive_ccache.c2
-rw-r--r--drivers/clocksource/timer-stm32-lp.c61
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs226
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c36
-rw-r--r--drivers/cpuidle/cpuidle-psci.c9
-rw-r--r--drivers/cxl/Kconfig71
-rw-r--r--drivers/cxl/acpi.c24
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/cdat.c2
-rw-r--r--drivers/cxl/core/core.h4
-rw-r--r--drivers/cxl/core/edac.c2102
-rw-r--r--drivers/cxl/core/features.c43
-rw-r--r--drivers/cxl/core/hdm.c11
-rw-r--r--drivers/cxl/core/mbox.c11
-rw-r--r--drivers/cxl/core/memdev.c5
-rw-r--r--drivers/cxl/core/pci.c48
-rw-r--r--drivers/cxl/core/port.c23
-rw-r--r--drivers/cxl/core/region.c189
-rw-r--r--drivers/cxl/cxl.h23
-rw-r--r--drivers/cxl/cxlmem.h30
-rw-r--r--drivers/cxl/mem.c4
-rw-r--r--drivers/cxl/port.c15
-rw-r--r--drivers/dax/kmem.c10
-rw-r--r--drivers/dma/idxd/init.c41
-rw-r--r--drivers/edac/altera_edac.c6
-rwxr-xr-xdrivers/edac/mem_repair.c9
-rw-r--r--drivers/firmware/Kconfig17
-rw-r--r--drivers/firmware/arm_scmi/Kconfig13
-rw-r--r--drivers/firmware/arm_scmi/Makefile1
-rw-r--r--drivers/firmware/arm_scmi/bus.c79
-rw-r--r--drivers/firmware/arm_scmi/clock.c33
-rw-r--r--drivers/firmware/arm_scmi/common.h1
-rw-r--r--drivers/firmware/arm_scmi/driver.c119
-rw-r--r--drivers/firmware/arm_scmi/protocols.h2
-rw-r--r--drivers/firmware/arm_scmi/quirks.c322
-rw-r--r--drivers/firmware/arm_scmi/quirks.h52
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c72
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig24
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c276
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c263
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst828
-rw-r--r--drivers/firmware/efi/Kconfig24
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c1
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S32
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds11
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c4
-rw-r--r--drivers/firmware/imx/Kconfig22
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/sm-cpu.c85
-rw-r--r--drivers/firmware/imx/sm-lmm.c91
-rw-r--r--drivers/firmware/qcom/qcom_scm.c3
-rw-r--r--drivers/firmware/qcom/qcom_scm.h3
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c1
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c16
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c58
-rw-r--r--drivers/firmware/smccc/kvm_guest.c10
-rw-r--r--drivers/firmware/smccc/smccc.c17
-rw-r--r--drivers/firmware/ti_sci.c14
-rw-r--r--drivers/firmware/turris-mox-rwtm.c260
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h108
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c6
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-appletb-kbd.c9
-rw-r--r--drivers/hid/hid-core.c9
-rw-r--r--drivers/hid/hid-corsair-void.c4
-rw-r--r--drivers/hid/hid-cp2112.c66
-rw-r--r--drivers/hid/hid-hyperv.c4
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-kysona.c46
-rw-r--r--drivers/hid/hid-lg4ff.c6
-rw-r--r--drivers/hid/hid-magicmouse.c74
-rw-r--r--drivers/hid/hid-mcp2200.c23
-rw-r--r--drivers/hid/hid-mcp2221.c10
-rw-r--r--drivers/hid/hid-multitouch.c12
-rw-r--r--drivers/hid/hid-quirks.c5
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c7
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c4
-rw-r--r--drivers/hid/usbhid/hid-core.c25
-rw-r--r--drivers/hv/Kconfig7
-rw-r--r--drivers/hv/connection.c23
-rw-r--r--drivers/hv/hv_common.c76
-rw-r--r--drivers/hv/vmbus_drv.c95
-rw-r--r--drivers/hwmon/Kconfig29
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/aht10.c16
-rw-r--r--drivers/hwmon/amc6821.c50
-rw-r--r--drivers/hwmon/asus-ec-sensors.c53
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/gpio-fan.c103
-rw-r--r--drivers/hwmon/ina238.c214
-rw-r--r--drivers/hwmon/ina2xx.c8
-rw-r--r--drivers/hwmon/isl28022.c8
-rw-r--r--drivers/hwmon/k10temp.c7
-rw-r--r--drivers/hwmon/kbatt.c147
-rw-r--r--drivers/hwmon/kfan.c246
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/ltc2992.c30
-rw-r--r--drivers/hwmon/max6639.c16
-rw-r--r--drivers/hwmon/max77705-hwmon.c221
-rw-r--r--drivers/hwmon/nct7363.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/lm25066.c2
-rw-r--r--drivers/hwmon/pmbus/lt3074.c122
-rw-r--r--drivers/hwmon/pmbus/max34440.c119
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c4
-rw-r--r--drivers/hwmon/pmbus/mpq8785.c91
-rw-r--r--drivers/hwmon/pmbus/pmbus.h19
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c69
-rw-r--r--drivers/hwmon/pmbus/tda38640.c2
-rw-r--r--drivers/hwmon/pmbus/tps25990.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c16
-rw-r--r--drivers/hwmon/pwm-fan.c4
-rw-r--r--drivers/hwmon/qnap-mcu-hwmon.c1
-rw-r--r--drivers/hwmon/spd5118.c357
-rw-r--r--drivers/hwmon/tmp102.c5
-rw-r--r--drivers/hwmon/xgene-hwmon.c39
-rw-r--r--drivers/i3c/master/Kconfig4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c32
-rw-r--r--drivers/i3c/master/svc-i3c-master.c109
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c4
-rw-r--r--drivers/infiniband/core/cm.c78
-rw-r--r--drivers/infiniband/core/cm_trace.h2
-rw-r--r--drivers/infiniband/core/cma.c25
-rw-r--r--drivers/infiniband/core/cma_trace.h2
-rw-r--r--drivers/infiniband/core/iwcm.c29
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c271
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c20
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h1
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c10
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c18
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c2
-rw-r--r--drivers/infiniband/hw/hns/Makefile1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_trace.h216
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c2
-rw-r--r--drivers/infiniband/hw/irdma/pble.c2
-rw-r--r--drivers/infiniband/hw/mana/cq.c4
-rw-r--r--drivers/infiniband/hw/mana/device.c174
-rw-r--r--drivers/infiniband/hw/mana/main.c92
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h7
-rw-r--r--drivers/infiniband/hw/mana/mr.c29
-rw-r--r--drivers/infiniband/hw/mana/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c8
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c58
-rw-r--r--drivers/infiniband/hw/mlx5/main.c29
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h13
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c65
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c30
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c66
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c144
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h2
-rw-r--r--drivers/infiniband/sw/siw/siw.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c28
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c8
-rw-r--r--drivers/input/joystick/xpad.c53
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c6
-rw-r--r--drivers/input/keyboard/matrix_keypad.c30
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c25
-rw-r--r--drivers/input/misc/ims-pcu.c6
-rw-r--r--drivers/iommu/Kconfig158
-rw-r--r--drivers/iommu/Makefile6
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd/init.c94
-rw-r--r--drivers/iommu/amd/io_pgtable.c38
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c12
-rw-r--r--drivers/iommu/amd/iommu.c94
-rw-r--r--drivers/iommu/amd/ppr.c2
-rw-r--r--drivers/iommu/apple-dart.c3
-rw-r--r--drivers/iommu/arm/Kconfig144
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c86
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c138
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h39
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c9
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c44
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c6
-rw-r--r--drivers/iommu/dma-iommu.c11
-rw-r--r--drivers/iommu/exynos-iommu.c12
-rw-r--r--drivers/iommu/fsl_pamu_domain.c2
-rw-r--r--drivers/iommu/intel/Makefile7
-rw-r--r--drivers/iommu/intel/dmar.c14
-rw-r--r--drivers/iommu/intel/iommu.c244
-rw-r--r--drivers/iommu/intel/iommu.h62
-rw-r--r--drivers/iommu/intel/irq_remapping.c12
-rw-r--r--drivers/iommu/intel/nested.c20
-rw-r--r--drivers/iommu/intel/pasid.c13
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/prq.c7
-rw-r--r--drivers/iommu/intel/svm.c9
-rw-r--r--drivers/iommu/io-pgtable-arm.c58
-rw-r--r--drivers/iommu/io-pgtable-dart.c23
-rw-r--r--drivers/iommu/iommu-pages.c119
-rw-r--r--drivers/iommu/iommu-pages.h195
-rw-r--r--drivers/iommu/iommu-sva.c18
-rw-r--r--drivers/iommu/iommu.c107
-rw-r--r--drivers/iommu/iommufd/device.c59
-rw-r--r--drivers/iommu/iommufd/eventq.c48
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h6
-rw-r--r--drivers/iommu/iommufd/selftest.c57
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/mtk_iommu.c37
-rw-r--r--drivers/iommu/riscv/Makefile2
-rw-r--r--drivers/iommu/riscv/iommu.c43
-rw-r--r--drivers/iommu/rockchip-iommu.c14
-rw-r--r--drivers/iommu/s390-iommu.c345
-rw-r--r--drivers/iommu/sun50i-iommu.c6
-rw-r--r--drivers/iommu/tegra-smmu.c111
-rw-r--r--drivers/iommu/virtio-iommu.c187
-rw-r--r--drivers/leds/.kunitconfig4
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c6
-rw-r--r--drivers/leds/flash/Kconfig11
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-tps6131x.c815
-rw-r--r--drivers/leds/led-class-flash.c15
-rw-r--r--drivers/leds/led-class-multicolor.c3
-rw-r--r--drivers/leds/led-core.c43
-rw-r--r--drivers/leds/led-test.c132
-rw-r--r--drivers/leds/led-triggers.c13
-rw-r--r--drivers/leds/leds-cros_ec.c21
-rw-r--r--drivers/leds/leds-lp8860.c214
-rw-r--r--drivers/leds/leds-pca9532.c11
-rw-r--r--drivers/leds/leds-pca955x.c28
-rw-r--r--drivers/leds/leds-pca995x.c2
-rw-r--r--drivers/leds/leds-tca6507.c11
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/rgb/leds-mt6370-rgb.c16
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c5
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c7
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c48
-rw-r--r--drivers/mailbox/Kconfig14
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/cv1800-mailbox.c220
-rw-r--r--drivers/mailbox/imx-mailbox.c21
-rw-r--r--drivers/mailbox/mailbox.c199
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c51
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c16
-rw-r--r--drivers/md/bcache/btree.c3
-rw-r--r--drivers/md/dm-bufio.c189
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-delay.c17
-rw-r--r--drivers/md/dm-dust.c4
-rw-r--r--drivers/md/dm-ebs-target.c3
-rw-r--r--drivers/md/dm-flakey.c118
-rw-r--r--drivers/md/dm-ioctl.c1
-rw-r--r--drivers/md/dm-linear.c4
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-mpath.c243
-rw-r--r--drivers/md/dm-raid1.c5
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-stripe.c5
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-table.c263
-rw-r--r--drivers/md/dm-vdo/indexer/volume.c24
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c15
-rw-r--r--drivers/md/dm-verity-verify-sig.c17
-rw-r--r--drivers/md/dm-zone.c98
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c73
-rw-r--r--drivers/md/dm.h6
-rw-r--r--drivers/memory/Kconfig23
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/bt1-l2-ctl.c2
-rw-r--r--drivers/memory/mtk-smi.c52
-rw-r--r--drivers/memory/omap-gpmc.c15
-rw-r--r--drivers/memory/stm32_omm.c479
-rw-r--r--drivers/memory/tegra/Kconfig8
-rw-r--r--drivers/mfd/88pm886.c14
-rw-r--r--drivers/mfd/Kconfig35
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/aat2870-core.c4
-rw-r--r--drivers/mfd/as3722.c4
-rw-r--r--drivers/mfd/bcm590xx.c66
-rw-r--r--drivers/mfd/exynos-lpass.c31
-rw-r--r--drivers/mfd/max14577.c1
-rw-r--r--drivers/mfd/max77541.c2
-rw-r--r--drivers/mfd/max77705.c4
-rw-r--r--drivers/mfd/max8925-i2c.c1
-rw-r--r--drivers/mfd/rohm-bd96801.c565
-rw-r--r--drivers/mfd/rt5033.c6
-rw-r--r--drivers/mfd/sec-acpm.c442
-rw-r--r--drivers/mfd/sec-common.c301
-rw-r--r--drivers/mfd/sec-core.c481
-rw-r--r--drivers/mfd/sec-core.h23
-rw-r--r--drivers/mfd/sec-i2c.c239
-rw-r--r--drivers/mfd/sec-irq.c460
-rw-r--r--drivers/mfd/sm501.c50
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c5
-rw-r--r--drivers/mfd/stm32-lptimer.c33
-rw-r--r--drivers/mfd/stmpe-spi.c2
-rw-r--r--drivers/mfd/tps65010.c9
-rw-r--r--drivers/mfd/ucb1x00-core.c7
-rw-r--r--drivers/misc/uacce/uacce.c40
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/mtdcore.c152
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdpart.c16
-rw-r--r--drivers/mtd/nand/ecc-mxic.c2
-rw-r--r--drivers/mtd/nand/qpic_common.c8
-rw-r--r--drivers/mtd/nand/raw/Kconfig9
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c5
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c248
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c13
-rw-r--r--drivers/mtd/nand/raw/loongson1-nand-controller.c836
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c18
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c20
-rw-r--r--drivers/mtd/nand/spi/ato.c14
-rw-r--r--drivers/mtd/nand/spi/core.c20
-rw-r--r--drivers/mtd/nand/spi/esmt.c22
-rw-r--r--drivers/mtd/nand/spi/foresee.c16
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c60
-rw-r--r--drivers/mtd/nand/spi/macronix.c20
-rw-r--r--drivers/mtd/nand/spi/micron.c38
-rw-r--r--drivers/mtd/nand/spi/paragon.c20
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c20
-rw-r--r--drivers/mtd/nand/spi/toshiba.c22
-rw-r--r--drivers/mtd/nand/spi/winbond.c128
-rw-r--r--drivers/mtd/nand/spi/xtx.c20
-rw-r--r--drivers/mtd/spi-nor/macronix.c73
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c19
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c2
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/of/fdt.c34
-rw-r--r--drivers/of/kexec.c42
-rw-r--r--drivers/pci/controller/pci-hyperv.c99
-rw-r--r--drivers/platform/cznic/Kconfig17
-rw-r--r--drivers/platform/cznic/Makefile3
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c4
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c21
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-keyctl.c162
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-trng.c17
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h33
-rw-r--r--drivers/platform/cznic/turris-signing-key.c193
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c4
-rw-r--r--drivers/pwm/pwm-stm32-lp.c219
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c20
-rw-r--r--drivers/rapidio/rio.c103
-rw-r--r--drivers/rapidio/rio.h2
-rw-r--r--drivers/rapidio/rio_cm.c6
-rw-r--r--drivers/regulator/bcm590xx-regulator.c1289
-rw-r--r--drivers/regulator/bd96801-regulator.c455
-rw-r--r--drivers/remoteproc/Makefile6
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c98
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c7
-rw-r--r--drivers/remoteproc/stm32_rproc.c8
-rw-r--r--drivers/remoteproc/ti_k3_common.c551
-rw-r--r--drivers/remoteproc/ti_k3_common.h118
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c616
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c583
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c1018
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c34
-rw-r--r--drivers/reset/Kconfig17
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-rzv2h-usb2phy.c236
-rw-r--r--drivers/reset/reset-th1520.c135
-rw-r--r--drivers/rpmsg/qcom_smd.c10
-rw-r--r--drivers/rpmsg/rpmsg_core.c63
-rw-r--r--drivers/rpmsg/rpmsg_internal.h6
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c24
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c461
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c17
-rw-r--r--drivers/soc/fsl/Kconfig2
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c15
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c42
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h2
-rw-r--r--drivers/soc/imx/soc-imx8m.c177
-rw-r--r--drivers/soc/mediatek/mtk-dvfsrc.c53
-rw-r--r--drivers/soc/qcom/llcc-qcom.c497
-rw-r--r--drivers/soc/qcom/pmic_glink.c4
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c30
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c11
-rw-r--r--drivers/soc/qcom/smem.c2
-rw-r--r--drivers/soc/qcom/smp2p.c2
-rw-r--r--drivers/soc/qcom/socinfo.c1
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c75
-rw-r--r--drivers/soc/renesas/rz-sysc.c3
-rw-r--r--drivers/soc/renesas/rz-sysc.h1
-rw-r--r--drivers/soc/samsung/exynos-pmu.c78
-rw-r--r--drivers/soc/samsung/exynos-pmu.h1
-rw-r--r--drivers/soc/sophgo/Kconfig34
-rw-r--r--drivers/soc/sophgo/Makefile4
-rw-r--r--drivers/soc/sophgo/cv1800-rtcsys.c63
-rw-r--r--drivers/soc/sophgo/sg2044-topsys.c45
-rw-r--r--drivers/soc/ti/k3-ringacc.c2
-rw-r--r--drivers/soc/ti/k3-socinfo.c2
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c8
-rw-r--r--drivers/soc/vt8500/Kconfig19
-rw-r--r--drivers/soc/vt8500/Makefile2
-rw-r--r--drivers/soc/vt8500/wmt-socinfo.c125
-rw-r--r--drivers/spi/spi-qpic-snand.c1
-rw-r--r--drivers/tee/amdtee/core.c16
-rw-r--r--drivers/tee/optee/smc_abi.c3
-rw-r--r--drivers/tee/tee_core.c11
-rw-r--r--drivers/uio/uio_hv_generic.c7
-rw-r--r--drivers/usb/gadget/function/f_hid.c12
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c6
-rw-r--r--drivers/video/backlight/backlight.c93
-rw-r--r--drivers/video/backlight/lcd.c108
-rw-r--r--drivers/video/backlight/qcom-wled.c6
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbdev/arkfb.c5
-rw-r--r--drivers/video/fbdev/carminefb.c8
-rw-r--r--drivers/video/fbdev/carminefb.h2
-rw-r--r--drivers/video/fbdev/core/fb_backlight.c12
-rw-r--r--drivers/video/fbdev/core/fb_info.c1
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbcvt.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c104
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c8
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c2
-rw-r--r--drivers/video/fbdev/via/via-gpio.c10
-rw-r--r--drivers/watchdog/Kconfig26
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/apple_wdt.c7
-rw-r--r--drivers/watchdog/arm_smc_wdt.c17
-rw-r--r--drivers/watchdog/cros_ec_wdt.c30
-rw-r--r--drivers/watchdog/da9052_wdt.c27
-rw-r--r--drivers/watchdog/iTCO_wdt.c25
-rw-r--r--drivers/watchdog/intel_oc_wdt.c233
-rw-r--r--drivers/watchdog/lenovo_se30_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/pretimeout_noop.c2
-rw-r--r--drivers/watchdog/pretimeout_panic.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c7
-rw-r--r--drivers/watchdog/s32g_wdt.c315
-rw-r--r--drivers/watchdog/s3c2410_wdt.c39
-rw-r--r--drivers/watchdog/stm32_iwdg.c2
-rw-r--r--drivers/watchdog/wdt_pci.c2
514 files changed, 23062 insertions, 8485 deletions
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index 5a058e565b01..c6cf7068d23c 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -512,12 +512,6 @@ static int aie2_init(struct amdxdna_dev *xdna)
goto release_fw;
}
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret) {
- XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret);
- goto free_irq;
- }
-
psp_conf.fw_size = fw->size;
psp_conf.fw_buf = fw->data;
for (i = 0; i < PSP_MAX_REGS; i++)
@@ -526,14 +520,14 @@ static int aie2_init(struct amdxdna_dev *xdna)
if (!ndev->psp_hdl) {
XDNA_ERR(xdna, "failed to create psp");
ret = -ENOMEM;
- goto disable_sva;
+ goto free_irq;
}
xdna->dev_handle = ndev;
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
- goto disable_sva;
+ goto free_irq;
}
ret = aie2_mgmt_fw_query(ndev);
@@ -584,8 +578,6 @@ async_event_free:
aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
-disable_sva:
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
free_irq:
pci_free_irq_vectors(pdev);
release_fw:
@@ -601,7 +593,6 @@ static void aie2_fini(struct amdxdna_dev *xdna)
aie2_hw_stop(xdna);
aie2_error_async_events_free(ndev);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_free_irq_vectors(pdev);
}
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
index 26c1a4e6b6ec..47ea3ccc2142 100644
--- a/drivers/acpi/acpi_mrrm.c
+++ b/drivers/acpi/acpi_mrrm.c
@@ -157,8 +157,10 @@ static __init int add_boot_memory_ranges(void)
for (int i = 0; i < mrrm_mem_entry_num; i++) {
name = kasprintf(GFP_KERNEL, "range%d", i);
- if (!name)
+ if (!name) {
+ ret = -ENOMEM;
break;
+ }
kobj = kobject_create_and_add(name, pkobj);
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 803e3e893825..ff0802ace19b 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -168,7 +168,7 @@ void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size)
{
/* Always terminate destination string */
- memcpy(dest, source, dest_size);
+ strncpy(dest, source, dest_size);
dest[dest_size - 1] = 0;
}
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index ca3484dac5c4..fea11a35eea3 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -766,7 +766,7 @@ static int __init einj_probe(struct faux_device *fdev)
rc = einj_get_available_error_type(&available_error_type);
if (rc)
- return rc;
+ goto err_put_table;
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 1687483ff319..76a856c32c4d 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -12,7 +12,7 @@
enum acpi_irq_model_id acpi_irq_model;
-static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
+static acpi_gsi_domain_disp_fn acpi_get_gsi_domain_id;
static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
/**
@@ -307,12 +307,24 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
* for a given GSI
*/
void __init acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *(*fn)(u32))
+ acpi_gsi_domain_disp_fn fn)
{
acpi_irq_model = model;
acpi_get_gsi_domain_id = fn;
}
+/*
+ * acpi_get_gsi_dispatcher() - Get the GSI dispatcher function
+ *
+ * Return the dispatcher function that computes the domain fwnode for
+ * a given GSI.
+ */
+acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void)
+{
+ return acpi_get_gsi_domain_id;
+}
+EXPORT_SYMBOL_GPL(acpi_get_gsi_dispatcher);
+
/**
* acpi_set_gsi_to_irq_fallback - Register a GSI transfer
* callback to fallback to arch specified implementation.
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 0a725e46d017..53816dfab645 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/memblock.h>
+#include <linux/memory.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
@@ -429,13 +430,23 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
{
struct acpi_cedt_cfmws *cfmws;
int *fake_pxm = arg;
- u64 start, end;
+ u64 start, end, align;
int node;
+ int err;
cfmws = (struct acpi_cedt_cfmws *)header;
start = cfmws->base_hpa;
end = cfmws->base_hpa + cfmws->window_size;
+ /* Align memblock size to CFMW regions if possible */
+ align = 1UL << __ffs(start | end);
+ if (align >= SZ_256M) {
+ err = memory_block_advise_max_size(align);
+ if (err)
+ pr_warn("CFMWS: memblock size advise failed (%d)\n", err);
+ } else
+ pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n");
+
/*
* The SRAT may have already described NUMA details for all,
* or a portion of, this CFMWS HPA range. Extend the memblks
@@ -453,7 +464,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
return -EINVAL;
}
- if (numa_add_memblk(node, start, end) < 0) {
+ if (numa_add_reserved_memblk(node, start, end) < 0) {
/* CXL driver must handle the NUMA_NO_NODE case */
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
node, start, end);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 773799cfd443..79b20da0a256 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6682,12 +6682,6 @@ const struct ata_port_info ata_dummy_port_info = {
};
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-void ata_print_version(const struct device *dev, const char *version)
-{
- dev_printk(KERN_DEBUG, dev, "version %s\n", version);
-}
-EXPORT_SYMBOL(ata_print_version);
-
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b990c1ee0b12..c11d8e634bf7 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3432,7 +3432,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
enum ata_lpm_policy old_policy = link->lpm_policy;
- bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
+ bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
unsigned int err_mask;
int rc;
@@ -3443,28 +3443,35 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
return 0;
/*
- * DIPM is enabled only for MIN_POWER as some devices
- * misbehave when the host NACKs transition to SLUMBER. Order
- * device and link configurations such that the host always
- * allows DIPM requests.
+ * This function currently assumes that it will never be supplied policy
+ * ATA_LPM_UNKNOWN.
+ */
+ if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
+ return 0;
+
+ /*
+ * DIPM is enabled only for ATA_LPM_MIN_POWER,
+ * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
+ * some devices misbehave when the host NACKs transition to SLUMBER.
*/
ata_for_each_dev(dev, link, ENABLED) {
- bool hipm = ata_id_has_hipm(dev->id);
- bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
+ bool dev_has_hipm = ata_id_has_hipm(dev->id);
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
/* find the first enabled and LPM enabled devices */
if (!link_dev)
link_dev = dev;
- if (!lpm_dev && (hipm || dipm))
+ if (!lpm_dev &&
+ (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
lpm_dev = dev;
hints &= ~ATA_LPM_EMPTY;
- if (!hipm)
+ if (!dev_has_hipm)
hints &= ~ATA_LPM_HIPM;
/* disable DIPM before changing link config */
- if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
+ if (dev_has_dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
@@ -3505,10 +3512,16 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
if (ap && ap->slave_link)
ap->slave_link->lpm_policy = policy;
- /* host config updated, enable DIPM if transitioning to MIN_POWER */
+ /*
+ * Host config updated, enable DIPM if transitioning to
+ * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
+ * ATA_LPM_MED_POWER_WITH_DIPM.
+ */
ata_for_each_dev(dev, link, ENABLED) {
- if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
- ata_id_has_dipm(dev->id)) {
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
+ dev_has_dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 2e4463d3a356..cb46ce276bb1 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1509,9 +1509,10 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
struct ata_queued_cmd *qc;
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
- u64 sense_valid, val;
u16 extended_sense;
bool aux_icc_valid;
+ u32 sense_valid;
+ u64 val;
int ret = 0;
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
@@ -1529,8 +1530,7 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
return -EIO;
}
- sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
- ((u64)buf[10] << 16) | ((u64)buf[11] << 24);
+ sense_valid = get_unaligned_le32(&buf[8]);
extended_sense = get_unaligned_le16(&buf[14]);
aux_icc_valid = extended_sense & BIT(15);
@@ -1545,7 +1545,7 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
* If the command does not have any sense data, clear ATA_SENSE.
* Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished.
*/
- if (!(sense_valid & (1ULL << tag))) {
+ if (!(sense_valid & BIT(tag))) {
qc->result_tf.status &= ~ATA_SENSE;
continue;
}
@@ -1634,7 +1634,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
return;
}
- if (!(link->sactive & (1 << tag))) {
+ if (!(link->sactive & BIT(tag))) {
ata_link_err(link, "log page 10h reported inactive tag %d\n",
tag);
return;
@@ -1659,8 +1659,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) {
ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc,
ascq);
- ata_scsi_set_sense_information(dev, qc->scsicmd,
- &qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index c0eb8c67a9ff..a21c9895408d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -216,17 +216,21 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
scsi_build_sense(cmd, d_sense, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
+static void ata_scsi_set_sense_information(struct ata_queued_cmd *qc)
{
u64 information;
- information = ata_tf_read_block(tf, dev);
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(qc->dev,
+ "missing result TF: can't set INFORMATION sense field\n");
+ return;
+ }
+
+ information = ata_tf_read_block(&qc->result_tf, qc->dev);
if (information == U64_MAX)
return;
- scsi_set_sense_information(cmd->sense_buffer,
+ scsi_set_sense_information(qc->scsicmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, information);
}
@@ -971,8 +975,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* ata_gen_ata_sense - generate a SCSI fixed sense block
* @qc: Command that we are erroring out
*
- * Generate sense block for a failed ATA command @qc. Descriptor
- * format is used to accommodate LBA48 block address.
+ * Generate sense block for a failed ATA command @qc.
*
* LOCKING:
* None.
@@ -982,8 +985,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
- unsigned char *sb = cmd->sense_buffer;
- u64 block;
u8 sense_key, asc, ascq;
if (ata_dev_disabled(dev)) {
@@ -1014,12 +1015,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
-
- block = ata_tf_read_block(&qc->result_tf, dev);
- if (block == U64_MAX)
- return;
-
- scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -1679,8 +1674,10 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
ata_scsi_set_passthru_sense_fields(qc);
if (is_ck_cond_request)
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
- } else if (is_error && !have_sense) {
- ata_gen_ata_sense(qc);
+ } else if (is_error) {
+ if (!have_sense)
+ ata_gen_ata_sense(qc);
+ ata_scsi_set_sense_information(qc);
}
ata_qc_done(qc);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0337be4faec7..ce5c628fa6fd 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -141,9 +141,6 @@ extern int ata_scsi_offline_dev(struct ata_device *dev);
extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_scsi_dev_rescan(struct work_struct *work);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index c3042eca6332..f7f5131af937 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1301,32 +1301,32 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
}
if (dimm_test) {
- u8 test_parttern1[40] =
+ u8 test_pattern1[40] =
{0x55,0xAA,'P','r','o','m','i','s','e',' ',
'N','o','t',' ','Y','e','t',' ',
'D','e','f','i','n','e','d',' ',
'1','.','1','0',
'9','8','0','3','1','6','1','2',0,0};
- u8 test_parttern2[40] = {0};
+ u8 test_pattern2[40] = {0};
- pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
- pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x10040, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x40, 40);
- pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
- pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
+ pdc20621_put_to_dimm(host, test_pattern1, 0x10040, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
+ dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
+ pdc20621_get_from_dimm(host, test_pattern2, 0x10040,
40);
dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
- pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern1, 0x40, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
}
/* ECC initiliazation. */
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 19469e7f88c2..ed3e69dc785c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -110,6 +110,57 @@ static void memory_block_release(struct device *dev)
kfree(mem);
}
+
+/* Max block size to be set by memory_block_advise_max_size */
+static unsigned long memory_block_advised_size;
+static bool memory_block_advised_size_queried;
+
+/**
+ * memory_block_advise_max_size() - advise memory hotplug on the max suggested
+ * block size, usually for alignment.
+ * @size: suggestion for maximum block size. must be aligned on power of 2.
+ *
+ * Early boot software (pre-allocator init) may advise archs on the max block
+ * size. This value can only decrease after initialization, as the intent is
+ * to identify the largest supported alignment for all sources.
+ *
+ * Use of this value is arch-defined, as is min/max block size.
+ *
+ * Return: 0 on success
+ * -EINVAL if size is 0 or not pow2 aligned
+ * -EBUSY if value has already been probed
+ */
+int __init memory_block_advise_max_size(unsigned long size)
+{
+ if (!size || !is_power_of_2(size))
+ return -EINVAL;
+
+ if (memory_block_advised_size_queried)
+ return -EBUSY;
+
+ if (memory_block_advised_size)
+ memory_block_advised_size = min(memory_block_advised_size, size);
+ else
+ memory_block_advised_size = size;
+
+ return 0;
+}
+
+/**
+ * memory_block_advised_max_size() - query advised max hotplug block size.
+ *
+ * After the first call, the value can never change. Callers looking for the
+ * actual block size should use memory_block_size_bytes. This interface is
+ * intended for use by arch-init when initializing the hotplug block size.
+ *
+ * Return: advised size in bytes, or 0 if never set.
+ */
+unsigned long memory_block_advised_max_size(void)
+{
+ memory_block_advised_size_queried = true;
+ return memory_block_advised_size;
+}
+
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 618712071a1e..c19094481630 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memory.h>
+#include <linux/mempolicy.h>
#include <linux/vmstat.h>
#include <linux/notifier.h>
#include <linux/node.h>
@@ -214,6 +215,14 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
break;
}
}
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access == ACCESS_COORDINATE_CPU) {
+ if (mempolicy_set_node_perf(nid, coord)) {
+ pr_info("failed to set mempolicy attrs for node %d\n",
+ nid);
+ }
+ }
}
EXPORT_SYMBOL_GPL(node_set_perf_attrs);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index c1392743df9c..805f75b35115 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -928,22 +928,22 @@ bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
- * device_get_child_node_count - return the number of child nodes for device
- * @dev: Device to count the child nodes for
+ * fwnode_get_child_node_count - return the number of child nodes for a given firmware node
+ * @fwnode: Pointer to the parent firmware node
*
- * Return: the number of child nodes for a given device.
+ * Return: the number of child nodes for a given firmware node.
*/
-unsigned int device_get_child_node_count(const struct device *dev)
+unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *child;
unsigned int count = 0;
- device_for_each_child_node(dev, child)
+ fwnode_for_each_child_node(fwnode, child)
count++;
return count;
}
-EXPORT_SYMBOL_GPL(device_get_child_node_count);
+EXPORT_SYMBOL_GPL(fwnode_get_child_node_count);
bool device_dma_supported(const struct device *dev)
{
diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c
index 0f7f252c12f4..b75016e0e654 100644
--- a/drivers/block/zram/backend_deflate.c
+++ b/drivers/block/zram/backend_deflate.c
@@ -8,7 +8,7 @@
#include "backend_deflate.h"
/* Use the same value as crypto API */
-#define DEFLATE_DEF_WINBITS 11
+#define DEFLATE_DEF_WINBITS (-11)
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
struct deflate_ctx {
@@ -22,8 +22,10 @@ static void deflate_release_params(struct zcomp_params *params)
static int deflate_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = Z_DEFAULT_COMPRESSION;
+ if (params->deflate.winbits == ZCOMP_PARAM_NOT_SET)
+ params->deflate.winbits = DEFLATE_DEF_WINBITS;
return 0;
}
@@ -57,13 +59,13 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
return -ENOMEM;
ctx->context = zctx;
- sz = zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL);
+ sz = zlib_deflate_workspacesize(params->deflate.winbits, MAX_MEM_LEVEL);
zctx->cctx.workspace = vzalloc(sz);
if (!zctx->cctx.workspace)
goto error;
ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ params->deflate.winbits, DEFLATE_DEF_MEMLEVEL,
Z_DEFAULT_STRATEGY);
if (ret != Z_OK)
goto error;
@@ -73,7 +75,7 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
if (!zctx->dctx.workspace)
goto error;
- ret = zlib_inflateInit2(&zctx->dctx, -DEFLATE_DEF_WINBITS);
+ ret = zlib_inflateInit2(&zctx->dctx, params->deflate.winbits);
if (ret != Z_OK)
goto error;
diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c
index 847f3334eb38..daccd60857eb 100644
--- a/drivers/block/zram/backend_lz4.c
+++ b/drivers/block/zram/backend_lz4.c
@@ -18,7 +18,7 @@ static void lz4_release_params(struct zcomp_params *params)
static int lz4_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4_ACCELERATION_DEFAULT;
return 0;
diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c
index 5f37d5abcaeb..9e8a35dfa56d 100644
--- a/drivers/block/zram/backend_lz4hc.c
+++ b/drivers/block/zram/backend_lz4hc.c
@@ -18,7 +18,7 @@ static void lz4hc_release_params(struct zcomp_params *params)
static int lz4hc_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4HC_DEFAULT_CLEVEL;
return 0;
diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c
index 22c8067536f3..81defb98ed09 100644
--- a/drivers/block/zram/backend_zstd.c
+++ b/drivers/block/zram/backend_zstd.c
@@ -58,7 +58,7 @@ static int zstd_setup_params(struct zcomp_params *params)
return -ENOMEM;
params->drv_data = zp;
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = zstd_default_clevel();
zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 25339ed1e07e..4acffe671a5e 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -5,7 +5,11 @@
#include <linux/mutex.h>
-#define ZCOMP_PARAM_NO_LEVEL INT_MIN
+#define ZCOMP_PARAM_NOT_SET INT_MIN
+
+struct deflate_params {
+ s32 winbits;
+};
/*
* Immutable driver (backend) parameters. The driver may attach private
@@ -17,6 +21,9 @@ struct zcomp_params {
void *dict;
size_t dict_sz;
s32 level;
+ union {
+ struct deflate_params deflate;
+ };
void *drv_data;
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fda7d8624889..54c57103715f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -734,114 +734,19 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
submit_bio(bio);
}
-#define PAGE_WB_SIG "page_index="
-
-#define PAGE_WRITEBACK 0
-#define HUGE_WRITEBACK (1<<0)
-#define IDLE_WRITEBACK (1<<1)
-#define INCOMPRESSIBLE_WRITEBACK (1<<2)
-
-static int scan_slots_for_writeback(struct zram *zram, u32 mode,
- unsigned long nr_pages,
- unsigned long index,
- struct zram_pp_ctl *ctl)
-{
- for (; nr_pages != 0; index++, nr_pages--) {
- bool ok = true;
-
- zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index))
- goto next;
-
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME))
- goto next;
-
- if (mode & IDLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
- if (mode & HUGE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
- goto next;
- if (mode & INCOMPRESSIBLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- goto next;
-
- ok = place_pp_slot(zram, ctl, index);
-next:
- zram_slot_unlock(zram, index);
- if (!ok)
- break;
- }
-
- return 0;
-}
-
-static ssize_t writeback_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
{
- struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
- struct zram_pp_ctl *ctl = NULL;
+ unsigned long blk_idx = 0;
+ struct page *page = NULL;
struct zram_pp_slot *pps;
- unsigned long index = 0;
- struct bio bio;
struct bio_vec bio_vec;
- struct page *page = NULL;
- ssize_t ret = len;
- int mode, err;
- unsigned long blk_idx = 0;
-
- if (sysfs_streq(buf, "idle"))
- mode = IDLE_WRITEBACK;
- else if (sysfs_streq(buf, "huge"))
- mode = HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "huge_idle"))
- mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "incompressible"))
- mode = INCOMPRESSIBLE_WRITEBACK;
- else {
- if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
- return -EINVAL;
-
- if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
- index >= nr_pages)
- return -EINVAL;
-
- nr_pages = 1;
- mode = PAGE_WRITEBACK;
- }
-
- down_read(&zram->init_lock);
- if (!init_done(zram)) {
- ret = -EINVAL;
- goto release_init_lock;
- }
-
- /* Do not permit concurrent post-processing actions. */
- if (atomic_xchg(&zram->pp_in_progress, 1)) {
- up_read(&zram->init_lock);
- return -EAGAIN;
- }
-
- if (!zram->backing_dev) {
- ret = -ENODEV;
- goto release_init_lock;
- }
+ struct bio bio;
+ int ret = 0, err;
+ u32 index;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
-
- ctl = init_pp_ctl();
- if (!ctl) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
-
- scan_slots_for_writeback(zram, mode, nr_pages, index, ctl);
+ if (!page)
+ return -ENOMEM;
while ((pps = select_pp_slot(ctl))) {
spin_lock(&zram->wb_limit_lock);
@@ -929,10 +834,215 @@ next:
if (blk_idx)
free_block_bdev(zram, blk_idx);
-
-release_init_lock:
if (page)
__free_page(page);
+
+ return ret;
+}
+
+#define PAGE_WRITEBACK 0
+#define HUGE_WRITEBACK (1 << 0)
+#define IDLE_WRITEBACK (1 << 1)
+#define INCOMPRESSIBLE_WRITEBACK (1 << 2)
+
+static int parse_page_index(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ int ret;
+
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+ *hi = *lo + 1;
+ return 0;
+}
+
+static int parse_page_indexes(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ char *delim;
+ int ret;
+
+ delim = strchr(val, '-');
+ if (!delim)
+ return -EINVAL;
+
+ *delim = 0x00;
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+
+ ret = kstrtoul(delim + 1, 10, hi);
+ if (ret)
+ return ret;
+ if (*hi >= nr_pages || *lo > *hi)
+ return -ERANGE;
+ *hi += 1;
+ return 0;
+}
+
+static int parse_mode(char *val, u32 *mode)
+{
+ *mode = 0;
+
+ if (!strcmp(val, "idle"))
+ *mode = IDLE_WRITEBACK;
+ if (!strcmp(val, "huge"))
+ *mode = HUGE_WRITEBACK;
+ if (!strcmp(val, "huge_idle"))
+ *mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
+ if (!strcmp(val, "incompressible"))
+ *mode = INCOMPRESSIBLE_WRITEBACK;
+
+ if (*mode == 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int scan_slots_for_writeback(struct zram *zram, u32 mode,
+ unsigned long lo, unsigned long hi,
+ struct zram_pp_ctl *ctl)
+{
+ u32 index = lo;
+
+ while (index < hi) {
+ bool ok = true;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME))
+ goto next;
+
+ if (mode & IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+ if (mode & HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+ if (mode & INCOMPRESSIBLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
+next:
+ zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
+ index++;
+ }
+
+ return 0;
+}
+
+static ssize_t writeback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long lo = 0, hi = nr_pages;
+ struct zram_pp_ctl *ctl = NULL;
+ char *args, *param, *val;
+ ssize_t ret = len;
+ int err, mode = 0;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
+ if (!zram->backing_dev) {
+ ret = -ENODEV;
+ goto release_init_lock;
+ }
+
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ /*
+ * Workaround to support the old writeback interface.
+ *
+ * The old writeback interface has a minor inconsistency and
+ * requires key=value only for page_index parameter, while the
+ * writeback mode is a valueless parameter.
+ *
+ * This is not the case anymore and now all parameters are
+ * required to have values, however, we need to support the
+ * legacy writeback interface format so we check if we can
+ * recognize a valueless parameter as the (legacy) writeback
+ * mode.
+ */
+ if (!val || !*val) {
+ err = parse_mode(param, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "type")) {
+ err = parse_mode(val, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "page_index")) {
+ err = parse_page_index(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+
+ if (!strcmp(param, "page_indexes")) {
+ err = parse_page_indexes(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+ }
+
+ err = zram_writeback_slots(zram, ctl);
+ if (err)
+ ret = err;
+
+release_init_lock:
release_pp_ctl(zram, ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
@@ -1166,13 +1276,15 @@ static void comp_params_reset(struct zram *zram, u32 prio)
struct zcomp_params *params = &zram->params[prio];
vfree(params->dict);
- params->level = ZCOMP_PARAM_NO_LEVEL;
+ params->level = ZCOMP_PARAM_NOT_SET;
+ params->deflate.winbits = ZCOMP_PARAM_NOT_SET;
params->dict_sz = 0;
params->dict = NULL;
}
static int comp_params_store(struct zram *zram, u32 prio, s32 level,
- const char *dict_path)
+ const char *dict_path,
+ struct deflate_params *deflate_params)
{
ssize_t sz = 0;
@@ -1190,6 +1302,7 @@ static int comp_params_store(struct zram *zram, u32 prio, s32 level,
zram->params[prio].dict_sz = sz;
zram->params[prio].level = level;
+ zram->params[prio].deflate.winbits = deflate_params->winbits;
return 0;
}
@@ -1198,11 +1311,14 @@ static ssize_t algorithm_params_store(struct device *dev,
const char *buf,
size_t len)
{
- s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL;
+ s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NOT_SET;
char *args, *param, *val, *algo = NULL, *dict_path = NULL;
+ struct deflate_params deflate_params;
struct zram *zram = dev_to_zram(dev);
int ret;
+ deflate_params.winbits = ZCOMP_PARAM_NOT_SET;
+
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, &param, &val);
@@ -1233,6 +1349,13 @@ static ssize_t algorithm_params_store(struct device *dev,
dict_path = val;
continue;
}
+
+ if (!strcmp(param, "deflate.winbits")) {
+ ret = kstrtoint(val, 10, &deflate_params.winbits);
+ if (ret)
+ return ret;
+ continue;
+ }
}
/* Lookup priority by algorithm name */
@@ -1254,7 +1377,7 @@ static ssize_t algorithm_params_store(struct device *dev,
if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
return -EINVAL;
- ret = comp_params_store(zram, prio, level, dict_path);
+ ret = comp_params_store(zram, prio, level, dict_path, &deflate_params);
return ret ? ret : len;
}
@@ -1694,7 +1817,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
*/
handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
@@ -1761,7 +1884,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle);
@@ -1981,10 +2104,15 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
* We are holding per-CPU stream mutex and entry lock so better
* avoid direct reclaim. Allocation error is not fatal since
* we still have the old object in the mem_pool.
+ *
+ * XXX: technically, the node we really want here is the node that holds
+ * the original compressed data. But that would require us to modify
+ * zsmalloc API to return this information. For now, we will make do with
+ * the node of the page allocated for recompression.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle_new)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index ee29162da4ee..91ef99c42344 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -395,10 +395,7 @@ static struct attribute *gisb_arb_sysfs_attrs[] = {
&dev_attr_gisb_arb_timeout.attr,
NULL,
};
-
-static struct attribute_group gisb_arb_sysfs_attr_group = {
- .attrs = gisb_arb_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(gisb_arb_sysfs);
static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
@@ -490,10 +487,6 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
- if (err)
- return err;
-
platform_set_drvdata(pdev, gdev);
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
@@ -550,6 +543,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .dev_groups = gisb_arb_sysfs_groups,
},
};
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 52053f7c6d9a..c63a7e688db6 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -806,8 +806,6 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev)
dev_set_msi_domain(&mc_dev->dev, NULL);
}
- fsl_mc_cleanup_all_resource_pools(mc_dev);
-
/* if this step fails we cannot go further with cleanup as there is no way of
* communicating with the firmware
*/
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index dd1b5c0fb7e2..38d40c09b719 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -489,7 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
cmd_params->obj_id = cpu_to_le32(obj_id);
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -561,7 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index;
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index 6c3beb82dd1b..d2ea59471323 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -555,27 +555,6 @@ void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
}
}
-static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
- enum fsl_mc_pool_type pool_type)
-{
- struct fsl_mc_resource *resource;
- struct fsl_mc_resource *next;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
-
- list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
- devm_kfree(&mc_bus_dev->dev, resource);
-}
-
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
- fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
-}
-
/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index a8be8cf246fb..7671bd158545 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -139,9 +139,9 @@ static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *e
static int fsl_mc_dma_configure(struct device *dev)
{
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
int ret;
@@ -153,8 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev)
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
- /* @mc_drv may not be valid when we're called from the IOMMU layer */
- if (!ret && dev->driver && !mc_drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_fsl_mc_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -906,8 +906,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
error_cleanup_dev:
kfree(mc_dev->regions);
- kfree(mc_bus);
- kfree(mc_dev);
+ if (mc_bus)
+ kfree(mc_bus);
+ else
+ kfree(mc_dev);
return error;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index e1b7ec3ed1a7..beed4c53533d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -629,8 +629,6 @@ int __init fsl_mc_allocator_driver_init(void);
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-
int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_resource
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
index 9c4c1395fcdb..823969e4159c 100644
--- a/drivers/bus/fsl-mc/fsl-mc-uapi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -48,6 +48,7 @@ enum fsl_mc_cmd_index {
DPRC_GET_POOL,
DPRC_GET_POOL_COUNT,
DPRC_GET_CONNECTION,
+ DPRC_GET_MEM,
DPCI_GET_LINK_STATE,
DPCI_GET_PEER_ATTR,
DPAIOP_GET_SL_VERSION,
@@ -194,6 +195,12 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.token = true,
.size = 32,
},
+ [DPRC_GET_MEM] = {
+ .cmdid_value = 0x16D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
[DPCI_GET_LINK_STATE] = {
.cmdid_value = 0x0E10,
@@ -275,13 +282,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.size = 8,
},
[DPSW_GET_TAILDROP] = {
- .cmdid_value = 0x0A80,
+ .cmdid_value = 0x0A90,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 14,
},
[DPSW_SET_TAILDROP] = {
- .cmdid_value = 0x0A90,
+ .cmdid_value = 0x0A80,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 24,
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index a0ad7866cbfc..cd8754763f40 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
goto error_cleanup_resource;
- dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
- &dpmcp_dev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER);
- if (!dpmcp_dev->consumer_link) {
- error = -EINVAL;
- goto error_cleanup_mc_io;
+ /* If the DPRC device itself tries to allocate a portal (usually for
+ * UAPI interaction), don't add a device link between them since the
+ * DPMCP device is an actual child device of the DPRC and a reverse
+ * dependency is not allowed.
+ */
+ if (mc_dev != mc_bus_dev) {
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
}
*new_mc_io = mc_io;
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index f2052cd0a051..b22c59d57c8f 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -19,7 +19,7 @@
/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
/*
* usleep_range() min and max values used to throttle down polling
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f67b927ae4ca..9f624e5da991 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -677,51 +677,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
return 0;
}
-/* Interconnect instances to probe before l4_per instances */
-static struct resource early_bus_ranges[] = {
- /* am3/4 l4_wkup */
- { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
- /* omap4/5 and dra7 l4_cfg */
- { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
- /* omap4 l4_wkup */
- { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
- /* omap5 and dra7 l4_wkup without dra7 dcan segment */
- { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
-};
-
-static atomic_t sysc_defer = ATOMIC_INIT(10);
-
-/**
- * sysc_defer_non_critical - defer non_critical interconnect probing
- * @ddata: device driver data
- *
- * We want to probe l4_cfg and l4_wkup interconnect instances before any
- * l4_per instances as l4_per instances depend on resources on l4_cfg and
- * l4_wkup interconnects.
- */
-static int sysc_defer_non_critical(struct sysc *ddata)
-{
- struct resource *res;
- int i;
-
- if (!atomic_read(&sysc_defer))
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
- res = &early_bus_ranges[i];
- if (ddata->module_pa >= res->start &&
- ddata->module_pa <= res->end) {
- atomic_set(&sysc_defer, 0);
-
- return 0;
- }
- }
-
- atomic_dec_if_positive(&sysc_defer);
-
- return -EPROBE_DEFER;
-}
-
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
@@ -947,10 +902,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
if (error)
return error;
- error = sysc_defer_non_critical(ddata);
- if (error)
- return error;
-
sysc_check_children(ddata);
if (!of_property_present(np, "reg"))
@@ -2036,6 +1987,21 @@ static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
}
+static void sysc_module_enable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /*
+ * Clearing the SYSC_PRUSS_STANDBY_INIT bit - Updates OCP master
+ * port configuration to enable memory access outside of the
+ * PRU-ICSS subsystem.
+ */
+ reg &= (~SYSC_PRUSS_STANDBY_INIT);
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
@@ -2088,8 +2054,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_pruss;
ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 6874b72ec59d..e1a283805ea7 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -118,6 +118,8 @@ static void ccache_config_read(void)
}
static const struct of_device_id sifive_ccache_ids[] = {
+ { .compatible = "eswin,eic7700-l3-cache",
+ .data = (void *)(QUIRK_NONSTANDARD_CACHE_OPS) },
{ .compatible = "sifive,fu540-c000-ccache" },
{ .compatible = "sifive,fu740-c000-ccache" },
{ .compatible = "starfive,jh7100-ccache",
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index 928da2f6de69..6e7944ffd7c0 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -5,6 +5,7 @@
* Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -27,6 +28,7 @@ struct stm32_lp_private {
u32 psc;
struct device *dev;
struct clk *clk;
+ u32 version;
};
static struct stm32_lp_private*
@@ -47,12 +49,46 @@ static int stm32_clkevent_lp_shutdown(struct clock_event_device *clkevt)
return 0;
}
-static int stm32_clkevent_lp_set_timer(unsigned long evt,
- struct clock_event_device *clkevt,
- int is_periodic)
+static int stm32mp25_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
{
- struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+ u32 val;
+
+ regmap_read(priv->reg, STM32_LPTIM_CR, &val);
+ if (!FIELD_GET(STM32_LPTIM_ENABLE, val)) {
+ /* Enable LPTIMER to be able to write into IER and ARR registers */
+ regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
+ /*
+ * After setting the ENABLE bit, a delay of two counter clock cycles is needed
+ * before the LPTIM is actually enabled. For 32KHz rate, this makes approximately
+ * 62.5 micro-seconds, round it up.
+ */
+ udelay(63);
+ }
+ /* set next event counter */
+ regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+ /* enable ARR interrupt */
+ regmap_write(priv->reg, STM32_LPTIM_IER, STM32_LPTIM_ARRMIE);
+
+ /* Poll DIEROK and ARROK to ensure register access has completed */
+ ret = regmap_read_poll_timeout_atomic(priv->reg, STM32_LPTIM_ISR, val,
+ (val & STM32_LPTIM_DIEROK_ARROK) ==
+ STM32_LPTIM_DIEROK_ARROK,
+ 10, 500);
+ if (ret) {
+ dev_err(priv->dev, "access to LPTIM timed out\n");
+ /* Disable LPTIMER */
+ regmap_write(priv->reg, STM32_LPTIM_CR, 0);
+ return ret;
+ }
+ /* Clear DIEROK and ARROK flags */
+ regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_DIEROKCF_ARROKCF);
+ return 0;
+}
+
+static void stm32_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
+{
/* disable LPTIMER to be able to write into IER register*/
regmap_write(priv->reg, STM32_LPTIM_CR, 0);
/* enable ARR interrupt */
@@ -61,6 +97,22 @@ static int stm32_clkevent_lp_set_timer(unsigned long evt,
regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
/* set next event counter */
regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+}
+
+static int stm32_clkevent_lp_set_timer(unsigned long evt,
+ struct clock_event_device *clkevt,
+ int is_periodic)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+
+ if (priv->version == STM32_LPTIM_VERR_23) {
+ ret = stm32mp25_clkevent_lp_set_evt(priv, evt);
+ if (ret)
+ return ret;
+ } else {
+ stm32_clkevent_lp_set_evt(priv, evt);
+ }
/* start counter */
if (is_periodic)
@@ -176,6 +228,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return -ENOMEM;
priv->reg = ddata->regmap;
+ priv->version = ddata->version;
priv->clk = ddata->clk;
ret = clk_prepare_enable(priv->clk);
if (ret)
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index d64b07ec48e5..78702a08364f 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -217,6 +217,18 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_DT_RUST
+ tristate "Rust based Generic DT based cpufreq driver"
+ depends on HAVE_CLK && OF && RUST
+ select CPUFREQ_DT_PLATDEV
+ select PM_OPP
+ help
+ This adds a Rust based generic DT based cpufreq driver for frequency
+ management. It supports both uniprocessor (UP) and symmetric
+ multiprocessor (SMP) systems.
+
+ If in doubt, say N.
+
config CPUFREQ_VIRT
tristate "Virtual cpufreq driver"
depends on GENERIC_ARCH_TOPOLOGY
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 22ab45209f9b..d38526b8e063 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index ea4b8f220a05..4f7f9201598d 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -660,7 +660,7 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
nominal_perf = perf_caps.nominal_perf;
if (nominal_freq)
- *nominal_freq = perf_caps.nominal_freq;
+ *nominal_freq = perf_caps.nominal_freq * 1000;
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 4e3ba6e68c32..f7512b4e923e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -482,7 +482,7 @@ static void check_supported_cpu(void *_rc)
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & P_STATE_TRANSITION_CAPABLE)
!= P_STATE_TRANSITION_CAPABLE) {
- pr_info("Power state transitions not supported\n");
+ pr_info_once("Power state transitions not supported\n");
return;
}
*rc = 0;
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
new file mode 100644
index 000000000000..94ed81644fe1
--- /dev/null
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust based implementation of the cpufreq-dt driver.
+
+use kernel::{
+ c_str,
+ clk::Clk,
+ cpu, cpufreq,
+ cpumask::CpumaskVar,
+ device::{Core, Device},
+ error::code::*,
+ fmt,
+ macros::vtable,
+ module_platform_driver, of, opp, platform,
+ prelude::*,
+ str::CString,
+ sync::Arc,
+};
+
+/// Finds exact supply name from the OF node.
+fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
+ let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?;
+ dev.property_present(&prop_name)
+ .then(|| CString::try_from_fmt(fmt!("{name}")).ok())
+ .flatten()
+}
+
+/// Finds supply name for the CPU from DT.
+fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> {
+ // Try "cpu0" for older DTs, fallback to "cpu".
+ let name = (cpu == 0)
+ .then(|| find_supply_name_exact(dev, "cpu0"))
+ .flatten()
+ .or_else(|| find_supply_name_exact(dev, "cpu"))?;
+
+ let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?;
+ list.push(name, GFP_KERNEL).ok()?;
+
+ Some(list)
+}
+
+/// Represents the cpufreq dt device.
+struct CPUFreqDTDevice {
+ opp_table: opp::Table,
+ freq_table: opp::FreqTable,
+ _mask: CpumaskVar,
+ _token: Option<opp::ConfigToken>,
+ _clk: Clk,
+}
+
+#[derive(Default)]
+struct CPUFreqDTDriver;
+
+#[vtable]
+impl opp::ConfigOps for CPUFreqDTDriver {}
+
+#[vtable]
+impl cpufreq::Driver for CPUFreqDTDriver {
+ const NAME: &'static CStr = c_str!("cpufreq-dt");
+ const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
+ const BOOST_ENABLED: bool = true;
+
+ type PData = Arc<CPUFreqDTDevice>;
+
+ fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> {
+ let cpu = policy.cpu();
+ // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq
+ // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device,
+ // once the CPU is hot-unplugged.
+ let dev = unsafe { cpu::from_cpu(cpu)? };
+ let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?;
+
+ mask.set(cpu);
+
+ let token = find_supply_names(dev, cpu)
+ .map(|names| {
+ opp::Config::<Self>::new()
+ .set_regulator_names(names)?
+ .set(dev)
+ })
+ .transpose()?;
+
+ // Get OPP-sharing information from "operating-points-v2" bindings.
+ let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) {
+ Ok(()) => false,
+ Err(e) if e == ENOENT => {
+ // "operating-points-v2" not supported. If the platform hasn't
+ // set sharing CPUs, fallback to all CPUs share the `Policy`
+ // for backward compatibility.
+ opp::Table::sharing_cpus(dev, &mut mask).is_err()
+ }
+ Err(e) => return Err(e),
+ };
+
+ // Initialize OPP tables for all policy cpus.
+ //
+ // For platforms not using "operating-points-v2" bindings, we do this
+ // before updating policy cpus. Otherwise, we will end up creating
+ // duplicate OPPs for the CPUs.
+ //
+ // OPPs might be populated at runtime, don't fail for error here unless
+ // it is -EPROBE_DEFER.
+ let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) {
+ Ok(table) => table,
+ Err(e) => {
+ if e == EPROBE_DEFER {
+ return Err(e);
+ }
+
+ // The table is added dynamically ?
+ opp::Table::from_dev(dev)?
+ }
+ };
+
+ // The OPP table must be initialized, statically or dynamically, by this point.
+ opp_table.opp_count()?;
+
+ // Set sharing cpus for fallback scenario.
+ if fallback {
+ mask.setall();
+ opp_table.set_sharing_cpus(&mut mask)?;
+ }
+
+ let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
+ if transition_latency == 0 {
+ transition_latency = cpufreq::ETERNAL_LATENCY_NS;
+ }
+
+ policy
+ .set_dvfs_possible_from_any_cpu(true)
+ .set_suspend_freq(opp_table.suspend_freq())
+ .set_transition_latency_ns(transition_latency);
+
+ let freq_table = opp_table.cpufreq_table()?;
+ // SAFETY: The `freq_table` is not dropped while it is getting used by the C code.
+ unsafe { policy.set_freq_table(&freq_table) };
+
+ // SAFETY: The returned `clk` is not dropped while it is getting used by the C code.
+ let clk = unsafe { policy.set_clk(dev, None)? };
+
+ mask.copy(policy.cpus());
+
+ Ok(Arc::new(
+ CPUFreqDTDevice {
+ opp_table,
+ freq_table,
+ _mask: mask,
+ _token: token,
+ _clk: clk,
+ },
+ GFP_KERNEL,
+ )?)
+ }
+
+ fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result {
+ Ok(())
+ }
+
+ fn online(_policy: &mut cpufreq::Policy) -> Result {
+ // We did light-weight tear down earlier, nothing to do here.
+ Ok(())
+ }
+
+ fn offline(_policy: &mut cpufreq::Policy) -> Result {
+ // Preserve policy->data and don't free resources on light-weight
+ // tear down.
+ Ok(())
+ }
+
+ fn suspend(policy: &mut cpufreq::Policy) -> Result {
+ policy.generic_suspend()
+ }
+
+ fn verify(data: &mut cpufreq::PolicyData) -> Result {
+ data.generic_verify()
+ }
+
+ fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result {
+ let Some(data) = policy.data::<Self::PData>() else {
+ return Err(ENOENT);
+ };
+
+ let freq = data.freq_table.freq(index)?;
+ data.opp_table.set_rate(freq)
+ }
+
+ fn get(policy: &mut cpufreq::Policy) -> Result<u32> {
+ policy.generic_get()
+ }
+
+ fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result {
+ Ok(())
+ }
+
+ fn register_em(policy: &mut cpufreq::Policy) {
+ policy.register_em_opp()
+ }
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <CPUFreqDTDriver as platform::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("operating-points-v2")), ())]
+);
+
+impl platform::Driver for CPUFreqDTDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _id_info: Option<&Self::IdInfo>,
+ ) -> Result<Pin<KBox<Self>>> {
+ cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
+ Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ }
+}
+
+module_platform_driver! {
+ type: CPUFreqDTDriver,
+ name: "cpufreq-dt",
+ author: "Viresh Kumar <viresh.kumar@linaro.org>",
+ description: "Generic CPUFreq DT driver",
+ license: "GPL v2",
+}
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 944e899eb1be..ef078426bfd5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -393,6 +393,40 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.set_boost = cpufreq_boost_set_sw,
};
+static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
+{
+ struct device_node *scmi_np = dev_of_node(scmi_dev);
+ struct device_node *cpu_np, *np;
+ struct device *cpu_dev;
+ int cpu, idx;
+
+ if (!scmi_np)
+ return false;
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ cpu_np = dev_of_node(cpu_dev);
+
+ np = of_parse_phandle(cpu_np, "clocks", 0);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+
+ idx = of_property_match_string(cpu_np, "power-domain-names", "perf");
+ np = of_parse_phandle(cpu_np, "power-domains", idx);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+ }
+
+ return false;
+}
+
static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
@@ -401,7 +435,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
handle = sdev->handle;
- if (!handle)
+ if (!handle || !scmi_dev_used_by_cpus(dev))
return -ENODEV;
scmi_cpufreq_driver.driver_data = sdev;
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 3c2756a539c4..4e1ba35deda9 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -456,14 +456,13 @@ static struct faux_device_ops psci_cpuidle_ops = {
static bool __init dt_idle_state_present(void)
{
- struct device_node *cpu_node __free(device_node);
- struct device_node *state_node __free(device_node);
-
- cpu_node = of_cpu_device_node_get(cpumask_first(cpu_possible_mask));
+ struct device_node *cpu_node __free(device_node) =
+ of_cpu_device_node_get(cpumask_first(cpu_possible_mask));
if (!cpu_node)
return false;
- state_node = of_get_cpu_state_node(cpu_node, 0);
+ struct device_node *state_node __free(device_node) =
+ of_get_cpu_state_node(cpu_node, 0);
if (!state_node)
return false;
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index cf1ba673b8c2..48b7314afdb8 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -114,6 +114,77 @@ config CXL_FEATURES
If unsure say 'n'
+config CXL_EDAC_MEM_FEATURES
+ bool "CXL: EDAC Memory Features"
+ depends on EXPERT
+ depends on CXL_MEM
+ depends on CXL_FEATURES
+ depends on EDAC >= CXL_BUS
+ help
+ The CXL EDAC memory feature is optional and allows host to
+ control the EDAC memory features configurations of CXL memory
+ expander devices.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory RAS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_SCRUB
+ bool "Enable CXL Patrol Scrub Control (Patrol Read)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_SCRUB
+ help
+ The CXL EDAC scrub control is optional and allows host to
+ control the scrub feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' and 'cxl_region' EDAC devices are
+ published with memory scrub control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-scrub.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory scrub feature established by the platform/device
+ (e.g. scrub rates for the patrol scrub feature).
+ Otherwise say 'n'.
+
+config CXL_EDAC_ECS
+ bool "Enable CXL Error Check Scrub (Repair)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_ECS
+ help
+ The CXL EDAC ECS control is optional and allows host to
+ control the ECS feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ ECS control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-ecs.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory ECS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_MEM_REPAIR
+ bool "Enable CXL Memory Repair"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_MEM_REPAIR
+ help
+ The CXL EDAC memory repair control is optional and allows host
+ to control the memory repair features (e.g. sparing, PPR)
+ configurations of CXL memory expander devices.
+
+ When enabled, the memory repair feature requires an additional
+ memory of approximately 43KB to store CXL DRAM and CXL general
+ media event records.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ repair control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-memory-repair.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory repair feature established by the platform/device.
+ Otherwise say 'n'.
+
config CXL_PORT
default CXL_BUS
tristate
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index cb14829bb9be..a1a99ec3f12c 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -11,8 +11,6 @@
#include "cxlpci.h"
#include "cxl.h"
-#define CXL_RCRB_SIZE SZ_8K
-
struct cxl_cxims_data {
int nr_maps;
u64 xormaps[] __counted_by(nr_maps);
@@ -421,7 +419,15 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = cxl_decoder_add(cxld, target_map);
if (rc)
return rc;
- return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
+
+ rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
+ if (rc)
+ return rc;
+
+ dev_dbg(root_port->dev.parent, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&root_port->dev));
+
+ return 0;
}
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -479,7 +485,11 @@ static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
chbs = (struct acpi_cedt_chbs *) header;
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
- chbs->length != CXL_RCRB_SIZE)
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11)
+ return 0;
+
+ if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 &&
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20)
return 0;
if (!chbs->base)
@@ -739,10 +749,10 @@ static void remove_cxl_resources(void *data)
* expanding its boundaries to ensure that any conflicting resources become
* children. If a window is expanded it may then conflict with a another window
* entry and require the window to be truncated or trimmed. Consider this
- * situation:
+ * situation::
*
- * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
- * |--------------- "System RAM" -------------|
+ * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
+ * |--------------- "System RAM" -------------|
*
* ...where platform firmware has established as System RAM resource across 2
* windows, but has left some portion of window 1 for dynamic CXL region
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 086df97a0fcf..79e2ef81fde8 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -20,3 +20,4 @@ cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
cxl_core-$(CONFIG_CXL_MCE) += mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += features.o
+cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += edac.o
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index edb4f41eeacc..0ccef2f2a26a 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -28,7 +28,7 @@ static u32 cdat_normalize(u16 entry, u64 base, u8 type)
*/
if (entry == 0xffff || !entry)
return 0;
- else if (base > (UINT_MAX / (entry)))
+ if (base > (UINT_MAX / (entry)))
return 0;
/*
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 17b692eb3257..29b61828a847 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -76,7 +76,7 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
struct dentry *cxl_debugfs_create_dir(const char *dir);
int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
enum cxl_partition_mode mode);
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
@@ -124,6 +124,8 @@ int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
int nid, resource_size_t *size);
#ifdef CONFIG_CXL_FEATURES
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
enum cxl_get_feat_selection selection,
void *feat_out, size_t feat_out_size, u16 offset,
diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c
new file mode 100644
index 000000000000..2cbc664e5d62
--- /dev/null
+++ b/drivers/cxl/core/edac.c
@@ -0,0 +1,2102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CXL EDAC memory feature driver.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ *
+ * - Supports functions to configure EDAC features of the
+ * CXL memory devices.
+ * - Registers with the EDAC device subsystem driver to expose
+ * the features sysfs attributes to the user for configuring
+ * CXL memory RAS feature.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/edac.h>
+#include <linux/limits.h>
+#include <linux/unaligned.h>
+#include <linux/xarray.h>
+#include <cxl/features.h>
+#include <cxl.h>
+#include <cxlmem.h>
+#include "core.h"
+#include "trace.h"
+
+#define CXL_NR_EDAC_DEV_FEATURES 7
+
+#define CXL_SCRUB_NO_REGION -1
+
+struct cxl_patrol_scrub_context {
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-222 Device Patrol Scrub Control
+ * Feature Readable Attributes.
+ */
+struct cxl_scrub_rd_attrbs {
+ u8 scrub_cycle_cap;
+ __le16 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-223 Device Patrol Scrub Control
+ * Feature Writable Attributes.
+ */
+struct cxl_scrub_wr_attrbs {
+ u8 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+#define CXL_SCRUB_CONTROL_CHANGEABLE BIT(0)
+#define CXL_SCRUB_CONTROL_REALTIME BIT(1)
+#define CXL_SCRUB_CONTROL_CYCLE_MASK GENMASK(7, 0)
+#define CXL_SCRUB_CONTROL_MIN_CYCLE_MASK GENMASK(15, 8)
+#define CXL_SCRUB_CONTROL_ENABLE BIT(0)
+
+#define CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CHANGEABLE, cap)
+#define CXL_GET_SCRUB_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_MIN_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_MIN_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_EN_STS(flags) FIELD_GET(CXL_SCRUB_CONTROL_ENABLE, flags)
+
+#define CXL_SET_SCRUB_CYCLE(cycle) \
+ FIELD_PREP(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_SET_SCRUB_EN(en) FIELD_PREP(CXL_SCRUB_CONTROL_ENABLE, en)
+
+static int cxl_mem_scrub_get_attrbs(struct cxl_mailbox *cxl_mbox, u8 *cap,
+ u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ size_t rd_data_size = sizeof(struct cxl_scrub_rd_attrbs);
+ size_t data_size;
+ struct cxl_scrub_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ *cap = rd_attrbs->scrub_cycle_cap;
+ *cycle = le16_to_cpu(rd_attrbs->scrub_cycle_hours);
+ *flags = rd_attrbs->scrub_flags;
+ if (min_cycle)
+ *min_cycle = CXL_GET_SCRUB_MIN_CYCLE(*cycle);
+
+ return 0;
+}
+
+static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ struct cxl_mailbox *cxl_mbox;
+ u8 min_scrub_cycle = U8_MAX;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ int i, ret;
+
+ if (!cxl_ps_ctx->cxlr) {
+ cxl_mbox = &cxl_ps_ctx->cxlmd->cxlds->cxl_mbox;
+ return cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle,
+ flags, min_cycle);
+ }
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle, flags,
+ min_cycle);
+ if (ret)
+ return ret;
+
+ if (min_cycle)
+ min_scrub_cycle = min(*min_cycle, min_scrub_cycle);
+ }
+
+ if (min_cycle)
+ *min_cycle = min_scrub_cycle;
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_region(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ int ret, i;
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten by region%d scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle,
+ cxlmd->scrub_region_id, cxlr->id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = cxlr->id;
+ }
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_device(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_memdev *cxlmd;
+ int ret;
+
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ cxlmd = cxl_ps_ctx->cxlmd;
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET, 0,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten with device local scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle, cxlmd->scrub_region_id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ if (cxl_ps_ctx->cxlr)
+ return cxl_scrub_set_attrbs_region(dev, cxl_ps_ctx, cycle, flags);
+
+ return cxl_scrub_set_attrbs_device(dev, cxl_ps_ctx, cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_enabled_bg(struct device *dev, void *drv_data,
+ bool *enabled)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *enabled = CXL_GET_SCRUB_EN_STS(flags);
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_enabled_bg(struct device *dev, void *drv_data,
+ bool enable)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, wr_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ wr_cycle = CXL_GET_SCRUB_CYCLE(rd_cycle);
+ flags = CXL_SET_SCRUB_EN(enable);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_min_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *min)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, min_cycle;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ *min = min_cycle * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_max_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *max)
+{
+ *max = U8_MAX * 3600; /* Max set by register size */
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_scrub_cycle(struct device *dev, void *drv_data,
+ u32 *scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *scrub_cycle_secs = CXL_GET_SCRUB_CYCLE(cycle) * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_scrub_cycle(struct device *dev, void *drv_data,
+ u32 scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 scrub_cycle_hours = scrub_cycle_secs / 3600;
+ u8 cap, wr_cycle, flags, min_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ if (!CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap))
+ return -EOPNOTSUPP;
+
+ if (scrub_cycle_hours < min_cycle) {
+ dev_dbg(dev, "Invalid CXL patrol scrub cycle(%d) to set\n",
+ scrub_cycle_hours);
+ dev_dbg(dev,
+ "Minimum supported CXL patrol scrub cycle in hour %d\n",
+ min_cycle);
+ return -EINVAL;
+ }
+ wr_cycle = CXL_SET_SCRUB_CYCLE(scrub_cycle_hours);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static const struct edac_scrub_ops cxl_ps_scrub_ops = {
+ .get_enabled_bg = cxl_patrol_scrub_get_enabled_bg,
+ .set_enabled_bg = cxl_patrol_scrub_set_enabled_bg,
+ .get_min_cycle = cxl_patrol_scrub_get_min_scrub_cycle,
+ .get_max_cycle = cxl_patrol_scrub_get_max_scrub_cycle,
+ .get_cycle_duration = cxl_patrol_scrub_get_scrub_cycle,
+ .set_cycle_duration = cxl_patrol_scrub_set_scrub_cycle,
+};
+
+static int cxl_memdev_scrub_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_feat_entry *feat_entry;
+ u8 cap, flags;
+ u16 cycle;
+ int rc;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_ps_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlmd = cxlmd,
+ };
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap, &cycle,
+ &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+static int cxl_region_scrub_init(struct cxl_region *cxlr,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_feat_entry *feat_entry = NULL;
+ struct cxl_memdev *cxlmd;
+ u8 cap, flags;
+ u16 cycle;
+ int i, rc;
+
+ /*
+ * The cxl_region_rwsem must be held if the code below is used in a context
+ * other than when the region is in the probe state, as shown here.
+ */
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) &
+ CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap,
+ &cycle, &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ cxl_ps_ctx = devm_kzalloc(&cxlr->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlr = cxlr,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+struct cxl_ecs_context {
+ u16 num_media_frus;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-225 DDR5 ECS Control Feature
+ * Readable Attributes.
+ */
+struct cxl_ecs_fru_rd_attrbs {
+ u8 ecs_cap;
+ __le16 ecs_config;
+ u8 ecs_flags;
+} __packed;
+
+struct cxl_ecs_rd_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_rd_attrbs fru_attrbs[];
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-226 DDR5 ECS Control Feature
+ * Writable Attributes.
+ */
+struct cxl_ecs_fru_wr_attrbs {
+ __le16 ecs_config;
+} __packed;
+
+struct cxl_ecs_wr_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_wr_attrbs fru_attrbs[];
+} __packed;
+
+#define CXL_ECS_LOG_ENTRY_TYPE_MASK GENMASK(1, 0)
+#define CXL_ECS_REALTIME_REPORT_CAP_MASK BIT(0)
+#define CXL_ECS_THRESHOLD_COUNT_MASK GENMASK(2, 0)
+#define CXL_ECS_COUNT_MODE_MASK BIT(3)
+#define CXL_ECS_RESET_COUNTER_MASK BIT(4)
+#define CXL_ECS_RESET_COUNTER 1
+
+enum {
+ ECS_THRESHOLD_256 = 256,
+ ECS_THRESHOLD_1024 = 1024,
+ ECS_THRESHOLD_4096 = 4096,
+};
+
+enum {
+ ECS_THRESHOLD_IDX_256 = 3,
+ ECS_THRESHOLD_IDX_1024 = 4,
+ ECS_THRESHOLD_IDX_4096 = 5,
+};
+
+static const u16 ecs_supp_threshold[] = {
+ [ECS_THRESHOLD_IDX_256] = 256,
+ [ECS_THRESHOLD_IDX_1024] = 1024,
+ [ECS_THRESHOLD_IDX_4096] = 4096,
+};
+
+enum {
+ ECS_LOG_ENTRY_TYPE_DRAM = 0x0,
+ ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU = 0x1,
+};
+
+enum cxl_ecs_count_mode {
+ ECS_MODE_COUNTS_ROWS = 0,
+ ECS_MODE_COUNTS_CODEWORDS = 1,
+};
+
+static int cxl_mem_ecs_get_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 *log_cap, u16 *config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ size_t rd_data_size;
+ size_t data_size;
+
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ *log_cap = rd_attrbs->ecs_log_cap;
+ *config = le16_to_cpu(fru_rd_attrbs[fru_id].ecs_config);
+
+ return 0;
+}
+
+static int cxl_mem_ecs_set_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 log_cap, u16 config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ struct cxl_ecs_fru_wr_attrbs *fru_wr_attrbs;
+ size_t rd_data_size, wr_data_size;
+ u16 num_media_frus, count;
+ size_t data_size;
+
+ num_media_frus = cxl_ecs_ctx->num_media_frus;
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+ wr_data_size = cxl_ecs_ctx->set_feat_size;
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ struct cxl_ecs_wr_attrbs *wr_attrbs __free(kvfree) =
+ kvzalloc(wr_data_size, GFP_KERNEL);
+ if (!wr_attrbs)
+ return -ENOMEM;
+
+ /*
+ * Fill writable attributes from the current attributes read
+ * for all the media FRUs.
+ */
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ fru_wr_attrbs = wr_attrbs->fru_attrbs;
+ wr_attrbs->ecs_log_cap = log_cap;
+ for (count = 0; count < num_media_frus; count++)
+ fru_wr_attrbs[count].ecs_config =
+ fru_rd_attrbs[count].ecs_config;
+
+ fru_wr_attrbs[fru_id].ecs_config = cpu_to_le16(config);
+
+ return cxl_set_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ cxl_ecs_ctx->set_version, wr_attrbs,
+ wr_data_size,
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+}
+
+static u8 cxl_get_ecs_log_entry_type(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_LOG_ENTRY_TYPE_MASK, log_cap);
+}
+
+static u16 cxl_get_ecs_threshold(u8 log_cap, u16 config)
+{
+ u8 index = FIELD_GET(CXL_ECS_THRESHOLD_COUNT_MASK, config);
+
+ return ecs_supp_threshold[index];
+}
+
+static u8 cxl_get_ecs_count_mode(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_COUNT_MODE_MASK, config);
+}
+
+#define CXL_ECS_GET_ATTR(attrb) \
+ static int cxl_ecs_get_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 *val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ *val = cxl_get_ecs_##attrb(log_cap, config); \
+ \
+ return 0; \
+ }
+
+CXL_ECS_GET_ATTR(log_entry_type)
+CXL_ECS_GET_ATTR(count_mode)
+CXL_ECS_GET_ATTR(threshold)
+
+static int cxl_set_ecs_log_entry_type(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != ECS_LOG_ENTRY_TYPE_DRAM &&
+ val != ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU)
+ return -EINVAL;
+
+ *log_cap = FIELD_PREP(CXL_ECS_LOG_ENTRY_TYPE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ *config &= ~CXL_ECS_THRESHOLD_COUNT_MASK;
+
+ switch (val) {
+ case ECS_THRESHOLD_256:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_256);
+ break;
+ case ECS_THRESHOLD_1024:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_1024);
+ break;
+ case ECS_THRESHOLD_4096:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_4096);
+ break;
+ default:
+ dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n",
+ val);
+ dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n",
+ ECS_THRESHOLD_256, ECS_THRESHOLD_1024,
+ ECS_THRESHOLD_4096);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxl_set_ecs_count_mode(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ if (val != ECS_MODE_COUNTS_ROWS && val != ECS_MODE_COUNTS_CODEWORDS) {
+ dev_dbg(dev, "Invalid CXL ECS scrub mode(%d) to set\n", val);
+ dev_dbg(dev,
+ "Supported ECS Modes: 0: ECS counts rows with errors,"
+ " 1: ECS counts codewords with errors\n");
+ return -EINVAL;
+ }
+
+ *config &= ~CXL_ECS_COUNT_MODE_MASK;
+ *config |= FIELD_PREP(CXL_ECS_COUNT_MODE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_reset_counter(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != CXL_ECS_RESET_COUNTER)
+ return -EINVAL;
+
+ *config &= ~CXL_ECS_RESET_COUNTER_MASK;
+ *config |= FIELD_PREP(CXL_ECS_RESET_COUNTER_MASK, val);
+
+ return 0;
+}
+
+#define CXL_ECS_SET_ATTR(attrb) \
+ static int cxl_ecs_set_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ if (!capable(CAP_SYS_RAWIO)) \
+ return -EPERM; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ ret = cxl_set_ecs_##attrb(dev, &log_cap, &config, val); \
+ if (ret) \
+ return ret; \
+ \
+ return cxl_mem_ecs_set_attrbs(dev, ctx, fru_id, log_cap, \
+ config); \
+ }
+CXL_ECS_SET_ATTR(log_entry_type)
+CXL_ECS_SET_ATTR(count_mode)
+CXL_ECS_SET_ATTR(reset_counter)
+CXL_ECS_SET_ATTR(threshold)
+
+static const struct edac_ecs_ops cxl_ecs_ops = {
+ .get_log_entry_type = cxl_ecs_get_log_entry_type,
+ .set_log_entry_type = cxl_ecs_set_log_entry_type,
+ .get_mode = cxl_ecs_get_count_mode,
+ .set_mode = cxl_ecs_set_count_mode,
+ .reset = cxl_ecs_set_reset_counter,
+ .get_threshold = cxl_ecs_get_threshold,
+ .set_threshold = cxl_ecs_set_threshold,
+};
+
+static int cxl_memdev_ecs_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature)
+{
+ struct cxl_ecs_context *cxl_ecs_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int num_media_frus;
+
+ feat_entry =
+ cxl_feature_info(to_cxlfs(cxlmd->cxlds), &CXL_FEAT_ECS_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ num_media_frus = (le16_to_cpu(feat_entry->get_feat_size) -
+ sizeof(struct cxl_ecs_rd_attrbs)) /
+ sizeof(struct cxl_ecs_fru_rd_attrbs);
+ if (!num_media_frus)
+ return -EOPNOTSUPP;
+
+ cxl_ecs_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ecs_ctx), GFP_KERNEL);
+ if (!cxl_ecs_ctx)
+ return -ENOMEM;
+
+ *cxl_ecs_ctx = (struct cxl_ecs_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .num_media_frus = num_media_frus,
+ .cxlmd = cxlmd,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_ECS;
+ ras_feature->ecs_ops = &cxl_ecs_ops;
+ ras_feature->ctx = cxl_ecs_ctx;
+ ras_feature->ecs_info.num_media_frus = num_media_frus;
+
+ return 0;
+}
+
+/*
+ * Perform Maintenance CXL 3.2 Spec 8.2.10.7.1
+ */
+
+/*
+ * Perform Maintenance input payload
+ * CXL rev 3.2 section 8.2.10.7.1 Table 8-117
+ */
+struct cxl_mbox_maintenance_hdr {
+ u8 op_class;
+ u8 op_subclass;
+} __packed;
+
+static int cxl_perform_maintenance(struct cxl_mailbox *cxl_mbox, u8 class,
+ u8 subclass, void *data_in,
+ size_t data_in_size)
+{
+ struct cxl_memdev_maintenance_pi {
+ struct cxl_mbox_maintenance_hdr hdr;
+ u8 data[];
+ } __packed;
+ struct cxl_mbox_cmd mbox_cmd;
+ size_t hdr_size;
+
+ struct cxl_memdev_maintenance_pi *pi __free(kvfree) =
+ kvzalloc(cxl_mbox->payload_size, GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ pi->hdr.op_class = class;
+ pi->hdr.op_subclass = subclass;
+ hdr_size = sizeof(pi->hdr);
+ /*
+ * Check minimum mbox payload size is available for
+ * the maintenance data transfer.
+ */
+ if (hdr_size + data_in_size > cxl_mbox->payload_size)
+ return -ENOMEM;
+
+ memcpy(pi->data, data_in, data_in_size);
+ mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_DO_MAINTENANCE,
+ .size_in = hdr_size + data_in_size,
+ .payload_in = pi,
+ };
+
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+}
+
+/*
+ * Support for finding a memory operation attributes
+ * are from the current boot or not.
+ */
+
+struct cxl_mem_err_rec {
+ struct xarray rec_gen_media;
+ struct xarray rec_dram;
+};
+
+enum cxl_mem_repair_type {
+ CXL_PPR,
+ CXL_CACHELINE_SPARING,
+ CXL_ROW_SPARING,
+ CXL_BANK_SPARING,
+ CXL_RANK_SPARING,
+ CXL_REPAIR_MAX,
+};
+
+/**
+ * struct cxl_mem_repair_attrbs - CXL memory repair attributes
+ * @dpa: DPA of memory to repair
+ * @nibble_mask: nibble mask, identifies one or more nibbles on the memory bus
+ * @row: row of memory to repair
+ * @column: column of memory to repair
+ * @channel: channel of memory to repair
+ * @sub_channel: sub channel of memory to repair
+ * @rank: rank of memory to repair
+ * @bank_group: bank group of memory to repair
+ * @bank: bank of memory to repair
+ * @repair_type: repair type. For eg. PPR, memory sparing etc.
+ */
+struct cxl_mem_repair_attrbs {
+ u64 dpa;
+ u32 nibble_mask;
+ u32 row;
+ u16 column;
+ u8 channel;
+ u8 sub_channel;
+ u8 rank;
+ u8 bank_group;
+ u8 bank;
+ enum cxl_mem_repair_type repair_type;
+};
+
+static struct cxl_event_gen_media *
+cxl_find_rec_gen_media(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_gen_media, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ if (attrbs->repair_type == CXL_PPR)
+ return rec;
+
+ return NULL;
+}
+
+static struct cxl_event_dram *
+cxl_find_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ u16 validity_flags;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_dram, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+ if (!(validity_flags & CXL_DER_VALID_CHANNEL) ||
+ !(validity_flags & CXL_DER_VALID_RANK))
+ return NULL;
+
+ switch (attrbs->repair_type) {
+ case CXL_PPR:
+ if (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) == attrbs->nibble_mask)
+ return rec;
+ break;
+ case CXL_CACHELINE_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW) ||
+ !(validity_flags & CXL_DER_VALID_COLUMN))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ get_unaligned_le16(rec->column) == attrbs->column &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask) &&
+ (!(validity_flags & CXL_DER_VALID_SUB_CHANNEL) ||
+ rec->sub_channel == attrbs->sub_channel))
+ return rec;
+ break;
+ case CXL_ROW_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_BANK_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_RANK_SPARING:
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+#define CXL_MAX_STORAGE_DAYS 10
+#define CXL_MAX_STORAGE_TIME_SECS (CXL_MAX_STORAGE_DAYS * 24 * 60 * 60)
+
+static void cxl_del_expired_gmedia_recs(struct xarray *rec_xarray,
+ struct cxl_event_gen_media *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_gen_media *rec;
+ unsigned long index;
+ u64 delta_ts_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_ts_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_ts_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+static void cxl_del_expired_dram_recs(struct xarray *rec_xarray,
+ struct cxl_event_dram *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_dram *rec;
+ unsigned long index;
+ u64 delta_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+#define CXL_MAX_REC_STORAGE_COUNT 200
+
+static void cxl_del_overflow_old_recs(struct xarray *rec_xarray)
+{
+ void *err_rec;
+ unsigned long index, count = 0;
+
+ xa_for_each(rec_xarray, index, err_rec)
+ count++;
+
+ if (count <= CXL_MAX_REC_STORAGE_COUNT)
+ return;
+
+ count -= CXL_MAX_REC_STORAGE_COUNT;
+ xa_for_each(rec_xarray, index, err_rec) {
+ xa_erase(rec_xarray, index);
+ kfree(err_rec);
+ count--;
+ if (!count)
+ break;
+ }
+}
+
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->gen_media, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_gen_media,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec))
+ return xa_err(old_rec);
+
+ kfree(old_rec);
+
+ cxl_del_expired_gmedia_recs(&array_rec->rec_gen_media, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_gen_media);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_gen_media, "CXL");
+
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->dram, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_dram,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec))
+ return xa_err(old_rec);
+
+ kfree(old_rec);
+
+ cxl_del_expired_dram_recs(&array_rec->rec_dram, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_dram);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_dram, "CXL");
+
+static bool cxl_is_memdev_memory_online(const struct cxl_memdev *cxlmd)
+{
+ struct cxl_port *port = cxlmd->endpoint;
+
+ if (port && cxl_num_decoders_committed(port))
+ return true;
+
+ return false;
+}
+
+/*
+ * CXL memory sparing control
+ */
+enum cxl_mem_sparing_granularity {
+ CXL_MEM_SPARING_CACHELINE,
+ CXL_MEM_SPARING_ROW,
+ CXL_MEM_SPARING_BANK,
+ CXL_MEM_SPARING_RANK,
+ CXL_MEM_SPARING_MAX
+};
+
+struct cxl_mem_sparing_context {
+ struct cxl_memdev *cxlmd;
+ uuid_t repair_uuid;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u16 effects;
+ u8 instance;
+ u8 get_version;
+ u8 set_version;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_safe_when_in_use;
+ bool cap_hard_sparing;
+ bool cap_soft_sparing;
+ u8 channel;
+ u8 rank;
+ u8 bank_group;
+ u32 nibble_mask;
+ u64 dpa;
+ u32 row;
+ u16 column;
+ u8 bank;
+ u8 sub_channel;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+};
+
+#define CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK BIT(0)
+#define CXL_SPARING_RD_CAP_HARD_SPARING_MASK BIT(1)
+#define CXL_SPARING_RD_CAP_SOFT_SPARING_MASK BIT(2)
+
+#define CXL_SPARING_WR_DEVICE_INITIATED_MASK BIT(0)
+
+#define CXL_SPARING_QUERY_RESOURCE_FLAG BIT(0)
+#define CXL_SET_HARD_SPARING_FLAG BIT(1)
+#define CXL_SPARING_SUB_CHNL_VALID_FLAG BIT(2)
+#define CXL_SPARING_NIB_MASK_VALID_FLAG BIT(3)
+
+#define CXL_GET_SPARING_SAFE_IN_USE(flags) \
+ (FIELD_GET(CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK, \
+ flags) ^ 1)
+#define CXL_GET_CAP_HARD_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_HARD_SPARING_MASK, \
+ flags)
+#define CXL_GET_CAP_SOFT_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_SOFT_SPARING_MASK, \
+ flags)
+
+#define CXL_SET_SPARING_QUERY_RESOURCE(val) \
+ FIELD_PREP(CXL_SPARING_QUERY_RESOURCE_FLAG, val)
+#define CXL_SET_HARD_SPARING(val) \
+ FIELD_PREP(CXL_SET_HARD_SPARING_FLAG, val)
+#define CXL_SET_SPARING_SUB_CHNL_VALID(val) \
+ FIELD_PREP(CXL_SPARING_SUB_CHNL_VALID_FLAG, val)
+#define CXL_SET_SPARING_NIB_MASK_VALID(val) \
+ FIELD_PREP(CXL_SPARING_NIB_MASK_VALID_FLAG, val)
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.2.3 Table 8-134 Memory Sparing Feature
+ * Readable Attributes.
+ */
+struct cxl_memdev_repair_rd_attrbs_hdr {
+ u8 max_op_latency;
+ __le16 op_cap;
+ __le16 op_mode;
+ u8 op_class;
+ u8 op_subclass;
+ u8 rsvd[9];
+} __packed;
+
+struct cxl_memdev_sparing_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 rsvd;
+ __le16 restriction_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.1.4 Table 8-120 Memory Sparing Input Payload.
+ */
+struct cxl_memdev_sparing_in_payload {
+ u8 flags;
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ __le16 column;
+ u8 sub_channel;
+} __packed;
+
+static int
+cxl_mem_sparing_get_attrbs(struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_sparing_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+ struct cxl_memdev_sparing_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_sparing_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_sparing_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_sparing_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_sparing_ctx->cap_safe_when_in_use =
+ CXL_GET_SPARING_SAFE_IN_USE(restriction_flags);
+ cxl_sparing_ctx->cap_hard_sparing =
+ CXL_GET_CAP_HARD_SPARING(restriction_flags);
+ cxl_sparing_ctx->cap_soft_sparing =
+ CXL_GET_CAP_SOFT_SPARING(restriction_flags);
+
+ return 0;
+}
+
+static struct cxl_event_dram *
+cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_sparing_context *ctx)
+{
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+
+ attrbs.dpa = ctx->dpa;
+ attrbs.channel = ctx->channel;
+ attrbs.rank = ctx->rank;
+ attrbs.nibble_mask = ctx->nibble_mask;
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ attrbs.repair_type = CXL_CACHELINE_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ attrbs.column = ctx->column;
+ attrbs.sub_channel = ctx->sub_channel;
+ break;
+ case EDAC_REPAIR_ROW_SPARING:
+ attrbs.repair_type = CXL_ROW_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ break;
+ case EDAC_REPAIR_BANK_SPARING:
+ attrbs.repair_type = CXL_BANK_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ break;
+ case EDAC_REPAIR_RANK_SPARING:
+ attrbs.repair_type = CXL_BANK_SPARING;
+ break;
+ default:
+ return NULL;
+ }
+
+ return cxl_find_rec_dram(cxlmd, &attrbs);
+}
+
+static int
+cxl_mem_perform_sparing(struct device *dev,
+ struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_memdev_sparing_in_payload sparing_pi;
+ struct cxl_event_dram *rec = NULL;
+ u16 validity_flags = 0;
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_dpa_rwsem);
+ if (!dpa_lock)
+ return -EINTR;
+
+ if (!cxl_sparing_ctx->cap_safe_when_in_use) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ rec = cxl_mem_get_rec_dram(cxlmd, cxl_sparing_ctx);
+ if (!rec)
+ return -EINVAL;
+
+ if (!get_unaligned_le16(rec->media_hdr.validity_flags))
+ return -EINVAL;
+ }
+ }
+
+ memset(&sparing_pi, 0, sizeof(sparing_pi));
+ sparing_pi.flags = CXL_SET_SPARING_QUERY_RESOURCE(0);
+ if (cxl_sparing_ctx->persist_mode)
+ sparing_pi.flags |= CXL_SET_HARD_SPARING(1);
+
+ if (rec)
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+
+ switch (cxl_sparing_ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ sparing_pi.column = cpu_to_le16(cxl_sparing_ctx->column);
+ if (!rec || (validity_flags & CXL_DER_VALID_SUB_CHANNEL)) {
+ sparing_pi.flags |= CXL_SET_SPARING_SUB_CHNL_VALID(1);
+ sparing_pi.sub_channel = cxl_sparing_ctx->sub_channel;
+ }
+ fallthrough;
+ case EDAC_REPAIR_ROW_SPARING:
+ put_unaligned_le24(cxl_sparing_ctx->row, sparing_pi.row);
+ fallthrough;
+ case EDAC_REPAIR_BANK_SPARING:
+ sparing_pi.bank_group = cxl_sparing_ctx->bank_group;
+ sparing_pi.bank = cxl_sparing_ctx->bank;
+ fallthrough;
+ case EDAC_REPAIR_RANK_SPARING:
+ sparing_pi.rank = cxl_sparing_ctx->rank;
+ fallthrough;
+ default:
+ sparing_pi.channel = cxl_sparing_ctx->channel;
+ if ((rec && (validity_flags & CXL_DER_VALID_NIBBLE)) ||
+ (!rec && (!cxl_sparing_ctx->nibble_mask ||
+ (cxl_sparing_ctx->nibble_mask & 0xFFFFFF)))) {
+ sparing_pi.flags |= CXL_SET_SPARING_NIB_MASK_VALID(1);
+ put_unaligned_le24(cxl_sparing_ctx->nibble_mask,
+ sparing_pi.nibble_mask);
+ }
+ break;
+ }
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_sparing_ctx->op_class,
+ cxl_sparing_ctx->op_subclass,
+ &sparing_pi, sizeof(sparing_pi));
+}
+
+static int cxl_mem_sparing_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ case EDAC_REPAIR_ROW_SPARING:
+ case EDAC_REPAIR_BANK_SPARING:
+ case EDAC_REPAIR_RANK_SPARING:
+ *repair_type = edac_repair_type[ctx->repair_type];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define CXL_SPARING_GET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_get_##attrb( \
+ struct device *dev, void *drv_data, data_type *val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ *val = ctx->attrb; \
+ \
+ return 0; \
+ }
+CXL_SPARING_GET_ATTR(persist_mode, bool)
+CXL_SPARING_GET_ATTR(dpa, u64)
+CXL_SPARING_GET_ATTR(nibble_mask, u32)
+CXL_SPARING_GET_ATTR(bank_group, u32)
+CXL_SPARING_GET_ATTR(bank, u32)
+CXL_SPARING_GET_ATTR(rank, u32)
+CXL_SPARING_GET_ATTR(row, u32)
+CXL_SPARING_GET_ATTR(column, u32)
+CXL_SPARING_GET_ATTR(channel, u32)
+CXL_SPARING_GET_ATTR(sub_channel, u32)
+
+#define CXL_SPARING_SET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_set_##attrb(struct device *dev, \
+ void *drv_data, data_type val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ ctx->attrb = val; \
+ \
+ return 0; \
+ }
+CXL_SPARING_SET_ATTR(nibble_mask, u32)
+CXL_SPARING_SET_ATTR(bank_group, u32)
+CXL_SPARING_SET_ATTR(bank, u32)
+CXL_SPARING_SET_ATTR(rank, u32)
+CXL_SPARING_SET_ATTR(row, u32)
+CXL_SPARING_SET_ATTR(column, u32)
+CXL_SPARING_SET_ATTR(channel, u32)
+CXL_SPARING_SET_ATTR(sub_channel, u32)
+
+static int cxl_mem_sparing_set_persist_mode(struct device *dev, void *drv_data,
+ bool persist_mode)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if ((persist_mode && ctx->cap_hard_sparing) ||
+ (!persist_mode && ctx->cap_soft_sparing))
+ ctx->persist_mode = persist_mode;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int cxl_get_mem_sparing_safe_when_in_use(struct device *dev,
+ void *drv_data, bool *safe)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ *safe = ctx->cap_safe_when_in_use;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_min_dpa(struct device *dev, void *drv_data,
+ u64 *min_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_max_dpa(struct device *dev, void *drv_data,
+ u64 *max_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ return -EINVAL;
+
+ ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_do_mem_sparing(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if (val != EDAC_DO_MEM_REPAIR)
+ return -EINVAL;
+
+ return cxl_mem_perform_sparing(dev, ctx);
+}
+
+#define RANK_OPS \
+ .get_repair_type = cxl_mem_sparing_get_repair_type, \
+ .get_persist_mode = cxl_mem_sparing_get_persist_mode, \
+ .set_persist_mode = cxl_mem_sparing_set_persist_mode, \
+ .get_repair_safe_when_in_use = cxl_get_mem_sparing_safe_when_in_use, \
+ .get_min_dpa = cxl_mem_sparing_get_min_dpa, \
+ .get_max_dpa = cxl_mem_sparing_get_max_dpa, \
+ .get_dpa = cxl_mem_sparing_get_dpa, \
+ .set_dpa = cxl_mem_sparing_set_dpa, \
+ .get_nibble_mask = cxl_mem_sparing_get_nibble_mask, \
+ .set_nibble_mask = cxl_mem_sparing_set_nibble_mask, \
+ .get_rank = cxl_mem_sparing_get_rank, \
+ .set_rank = cxl_mem_sparing_set_rank, \
+ .get_channel = cxl_mem_sparing_get_channel, \
+ .set_channel = cxl_mem_sparing_set_channel, \
+ .do_repair = cxl_do_mem_sparing
+
+#define BANK_OPS \
+ RANK_OPS, .get_bank_group = cxl_mem_sparing_get_bank_group, \
+ .set_bank_group = cxl_mem_sparing_set_bank_group, \
+ .get_bank = cxl_mem_sparing_get_bank, \
+ .set_bank = cxl_mem_sparing_set_bank
+
+#define ROW_OPS \
+ BANK_OPS, .get_row = cxl_mem_sparing_get_row, \
+ .set_row = cxl_mem_sparing_set_row
+
+#define CACHELINE_OPS \
+ ROW_OPS, .get_column = cxl_mem_sparing_get_column, \
+ .set_column = cxl_mem_sparing_set_column, \
+ .get_sub_channel = cxl_mem_sparing_get_sub_channel, \
+ .set_sub_channel = cxl_mem_sparing_set_sub_channel
+
+static const struct edac_mem_repair_ops cxl_rank_sparing_ops = {
+ RANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_bank_sparing_ops = {
+ BANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_row_sparing_ops = {
+ ROW_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_cacheline_sparing_ops = {
+ CACHELINE_OPS,
+};
+
+struct cxl_mem_sparing_desc {
+ const uuid_t repair_uuid;
+ enum edac_mem_repair_type repair_type;
+ const struct edac_mem_repair_ops *repair_ops;
+};
+
+static const struct cxl_mem_sparing_desc mem_sparing_desc[] = {
+ {
+ .repair_uuid = CXL_FEAT_CACHELINE_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_CACHELINE_SPARING,
+ .repair_ops = &cxl_cacheline_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_ROW_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_ROW_SPARING,
+ .repair_ops = &cxl_row_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_BANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_BANK_SPARING,
+ .repair_ops = &cxl_bank_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_RANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_RANK_SPARING,
+ .repair_ops = &cxl_rank_sparing_ops,
+ },
+};
+
+static int cxl_memdev_sparing_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ const struct cxl_mem_sparing_desc *desc,
+ u8 repair_inst)
+{
+ struct cxl_mem_sparing_context *cxl_sparing_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &desc->repair_uuid);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sparing_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sparing_ctx),
+ GFP_KERNEL);
+ if (!cxl_sparing_ctx)
+ return -ENOMEM;
+
+ *cxl_sparing_ctx = (struct cxl_mem_sparing_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = desc->repair_type,
+ .instance = repair_inst++,
+ };
+ uuid_copy(&cxl_sparing_ctx->repair_uuid, &desc->repair_uuid);
+
+ ret = cxl_mem_sparing_get_attrbs(cxl_sparing_ctx);
+ if (ret)
+ return ret;
+
+ if ((cxl_sparing_ctx->cap_soft_sparing &&
+ cxl_sparing_ctx->cap_hard_sparing) ||
+ cxl_sparing_ctx->cap_soft_sparing)
+ cxl_sparing_ctx->persist_mode = 0;
+ else if (cxl_sparing_ctx->cap_hard_sparing)
+ cxl_sparing_ctx->persist_mode = 1;
+ else
+ return -EOPNOTSUPP;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sparing_ctx->instance;
+ ras_feature->mem_repair_ops = desc->repair_ops;
+ ras_feature->ctx = cxl_sparing_ctx;
+
+ return 0;
+}
+
+/*
+ * CXL memory soft PPR & hard PPR control
+ */
+struct cxl_ppr_context {
+ uuid_t repair_uuid;
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_dpa;
+ bool cap_nib_mask;
+ bool media_accessible;
+ bool data_retained;
+ struct cxl_memdev *cxlmd;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+ u64 dpa;
+ u32 nibble_mask;
+};
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.2.1 Table 8-128 sPPR Feature Readable Attributes
+ *
+ * See CXL rev 3.2 @8.2.10.7.2.2 Table 8-131 hPPR Feature Readable Attributes
+ */
+
+#define CXL_PPR_OP_CAP_DEVICE_INITIATED BIT(0)
+#define CXL_PPR_OP_MODE_DEV_INITIATED BIT(0)
+
+#define CXL_PPR_FLAG_DPA_SUPPORT_MASK BIT(0)
+#define CXL_PPR_FLAG_NIB_SUPPORT_MASK BIT(1)
+#define CXL_PPR_FLAG_MEM_SPARING_EV_REC_SUPPORT_MASK BIT(2)
+#define CXL_PPR_FLAG_DEV_INITED_PPR_AT_BOOT_CAP_MASK BIT(3)
+
+#define CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK BIT(0)
+#define CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK BIT(2)
+
+#define CXL_PPR_SPARING_EV_REC_EN_MASK BIT(0)
+#define CXL_PPR_DEV_INITED_PPR_AT_BOOT_EN_MASK BIT(1)
+
+#define CXL_PPR_GET_CAP_DPA(flags) \
+ FIELD_GET(CXL_PPR_FLAG_DPA_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_CAP_NIB_MASK(flags) \
+ FIELD_GET(CXL_PPR_FLAG_NIB_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK, \
+ restriction_flags) ^ 1)
+#define CXL_PPR_GET_DATA_RETAINED(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK, \
+ restriction_flags) ^ 1)
+
+struct cxl_memdev_ppr_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 ppr_flags;
+ __le16 restriction_flags;
+ u8 ppr_op_mode;
+} __packed;
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.1.2 Table 8-118 sPPR Maintenance Input Payload
+ *
+ * See CXL rev 3.2 @8.2.10.7.1.3 Table 8-119 hPPR Maintenance Input Payload
+ */
+struct cxl_memdev_ppr_maintenance_attrbs {
+ u8 flags;
+ __le64 dpa;
+ u8 nibble_mask[3];
+} __packed;
+
+static int cxl_mem_ppr_get_attrbs(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_ppr_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+
+ struct cxl_memdev_ppr_rd_attrbs *rd_attrbs __free(kfree) =
+ kmalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_ppr_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_ppr_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_ppr_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ cxl_ppr_ctx->cap_dpa = CXL_PPR_GET_CAP_DPA(rd_attrbs->ppr_flags);
+ cxl_ppr_ctx->cap_nib_mask =
+ CXL_PPR_GET_CAP_NIB_MASK(rd_attrbs->ppr_flags);
+
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_ppr_ctx->media_accessible =
+ CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags);
+ cxl_ppr_ctx->data_retained =
+ CXL_PPR_GET_DATA_RETAINED(restriction_flags);
+
+ return 0;
+}
+
+static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_dpa_rwsem);
+ if (!dpa_lock)
+ return -EINTR;
+
+ if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ /* Check memory to repair is from the current boot */
+ attrbs.repair_type = CXL_PPR;
+ attrbs.dpa = cxl_ppr_ctx->dpa;
+ attrbs.nibble_mask = cxl_ppr_ctx->nibble_mask;
+ if (!cxl_find_rec_dram(cxlmd, &attrbs) &&
+ !cxl_find_rec_gen_media(cxlmd, &attrbs))
+ return -EINVAL;
+ }
+ }
+
+ memset(&maintenance_attrbs, 0, sizeof(maintenance_attrbs));
+ maintenance_attrbs.flags = 0;
+ maintenance_attrbs.dpa = cpu_to_le64(cxl_ppr_ctx->dpa);
+ put_unaligned_le24(cxl_ppr_ctx->nibble_mask,
+ maintenance_attrbs.nibble_mask);
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_ppr_ctx->op_class,
+ cxl_ppr_ctx->op_subclass,
+ &maintenance_attrbs,
+ sizeof(maintenance_attrbs));
+}
+
+static int cxl_ppr_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ *repair_type = edac_repair_type[EDAC_REPAIR_PPR];
+
+ return 0;
+}
+
+static int cxl_ppr_get_persist_mode(struct device *dev, void *drv_data,
+ bool *persist_mode)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *persist_mode = cxl_ppr_ctx->persist_mode;
+
+ return 0;
+}
+
+static int cxl_get_ppr_safe_when_in_use(struct device *dev, void *drv_data,
+ bool *safe)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *safe = cxl_ppr_ctx->media_accessible & cxl_ppr_ctx->data_retained;
+
+ return 0;
+}
+
+static int cxl_ppr_get_min_dpa(struct device *dev, void *drv_data, u64 *min_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_ppr_get_max_dpa(struct device *dev, void *drv_data, u64 *max_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_ppr_get_dpa(struct device *dev, void *drv_data, u64 *dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *dpa = cxl_ppr_ctx->dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ return -EINVAL;
+
+ cxl_ppr_ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_get_nibble_mask(struct device *dev, void *drv_data,
+ u32 *nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *nibble_mask = cxl_ppr_ctx->nibble_mask;
+
+ return 0;
+}
+
+static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data,
+ u32 nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ cxl_ppr_ctx->nibble_mask = nibble_mask;
+
+ return 0;
+}
+
+static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR)
+ return -EINVAL;
+
+ return cxl_mem_perform_ppr(cxl_ppr_ctx);
+}
+
+static const struct edac_mem_repair_ops cxl_sppr_ops = {
+ .get_repair_type = cxl_ppr_get_repair_type,
+ .get_persist_mode = cxl_ppr_get_persist_mode,
+ .get_repair_safe_when_in_use = cxl_get_ppr_safe_when_in_use,
+ .get_min_dpa = cxl_ppr_get_min_dpa,
+ .get_max_dpa = cxl_ppr_get_max_dpa,
+ .get_dpa = cxl_ppr_get_dpa,
+ .set_dpa = cxl_ppr_set_dpa,
+ .get_nibble_mask = cxl_ppr_get_nibble_mask,
+ .set_nibble_mask = cxl_ppr_set_nibble_mask,
+ .do_repair = cxl_do_ppr,
+};
+
+static int cxl_memdev_soft_ppr_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 repair_inst)
+{
+ struct cxl_ppr_context *cxl_sppr_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_SPPR_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sppr_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sppr_ctx), GFP_KERNEL);
+ if (!cxl_sppr_ctx)
+ return -ENOMEM;
+
+ *cxl_sppr_ctx = (struct cxl_ppr_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = EDAC_REPAIR_PPR,
+ .persist_mode = 0,
+ .instance = repair_inst,
+ };
+ uuid_copy(&cxl_sppr_ctx->repair_uuid, &CXL_FEAT_SPPR_UUID);
+
+ ret = cxl_mem_ppr_get_attrbs(cxl_sppr_ctx);
+ if (ret)
+ return ret;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sppr_ctx->instance;
+ ras_feature->mem_repair_ops = &cxl_sppr_ops;
+ ras_feature->ctx = cxl_sppr_ctx;
+
+ return 0;
+}
+
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ u8 repair_inst = 0;
+ int rc;
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_SCRUB)) {
+ rc = cxl_memdev_scrub_init(cxlmd, &ras_features[num_ras_features], 0);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_ECS)) {
+ rc = cxl_memdev_ecs_init(cxlmd, &ras_features[num_ras_features]);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR)) {
+ for (int i = 0; i < CXL_MEM_SPARING_MAX; i++) {
+ rc = cxl_memdev_sparing_init(cxlmd,
+ &ras_features[num_ras_features],
+ &mem_sparing_desc[i], repair_inst);
+ if (rc == -EOPNOTSUPP)
+ continue;
+ if (rc < 0)
+ return rc;
+
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ rc = cxl_memdev_soft_ppr_init(cxlmd, &ras_features[num_ras_features],
+ repair_inst);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP) {
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ if (repair_inst) {
+ struct cxl_mem_err_rec *array_rec =
+ devm_kzalloc(&cxlmd->dev, sizeof(*array_rec),
+ GFP_KERNEL);
+ if (!array_rec)
+ return -ENOMEM;
+
+ xa_init(&array_rec->rec_gen_media);
+ xa_init(&array_rec->rec_dram);
+ cxlmd->err_rec_array = array_rec;
+ }
+ }
+
+ if (!num_ras_features)
+ return -EINVAL;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlmd->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlmd->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_register, "CXL");
+
+int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_SCRUB))
+ return 0;
+
+ rc = cxl_region_scrub_init(cxlr, &ras_features[num_ras_features], 0);
+ if (rc < 0)
+ return rc;
+
+ num_ras_features++;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlr->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlr->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_region_edac_register, "CXL");
+
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec_gen_media;
+ struct cxl_event_dram *rec_dram;
+ unsigned long index;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return;
+
+ xa_for_each(&array_rec->rec_dram, index, rec_dram)
+ kfree(rec_dram);
+ xa_destroy(&array_rec->rec_dram);
+
+ xa_for_each(&array_rec->rec_gen_media, index, rec_gen_media)
+ kfree(rec_gen_media);
+ xa_destroy(&array_rec->rec_gen_media);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_release, "CXL");
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
index 1498e2369c37..6f2eae1eb126 100644
--- a/drivers/cxl/core/features.c
+++ b/drivers/cxl/core/features.c
@@ -9,6 +9,16 @@
#include "core.h"
#include "cxlmem.h"
+/**
+ * DOC: cxl features
+ *
+ * CXL Features:
+ * A CXL device that includes a mailbox supports commands that allows
+ * listing, getting, and setting of optionally defined features such
+ * as memory sparing or post package sparing. Vendors may define custom
+ * features for the device.
+ */
+
/* All the features below are exclusive to the kernel */
static const uuid_t cxl_exclusive_feats[] = {
CXL_FEAT_PATROL_SCRUB_UUID,
@@ -36,7 +46,7 @@ static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
}
-inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
+struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
{
return cxlds->cxlfs;
}
@@ -355,17 +365,11 @@ static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
{
}
-static struct cxl_feat_entry *
-get_support_feature_info(struct cxl_features_state *cxlfs,
- const struct fwctl_rpc_cxl *rpc_in)
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs,
+ const uuid_t *uuid)
{
struct cxl_feat_entry *feat;
- const uuid_t *uuid;
-
- if (rpc_in->op_size < sizeof(uuid))
- return ERR_PTR(-EINVAL);
-
- uuid = &rpc_in->set_feat_in.uuid;
for (int i = 0; i < cxlfs->entries->num_features; i++) {
feat = &cxlfs->entries->ent[i];
@@ -416,14 +420,6 @@ static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
rpc_out->size = struct_size(feat_out, ents, requested);
feat_out = &rpc_out->get_sup_feats_out;
- if (requested == 0) {
- feat_out->num_entries = cpu_to_le16(requested);
- feat_out->supported_feats =
- cpu_to_le16(cxlfs->entries->num_features);
- rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
- *out_len = out_size;
- return no_free_ptr(rpc_out);
- }
for (i = start, pos = &feat_out->ents[0];
i < cxlfs->entries->num_features; i++, pos++) {
@@ -547,7 +543,10 @@ static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
struct cxl_feat_entry *feat;
u32 flags;
- feat = get_support_feature_info(cxlfs, rpc_in);
+ if (rpc_in->op_size < sizeof(uuid_t))
+ return ERR_PTR(-EINVAL);
+
+ feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
if (IS_ERR(feat))
return false;
@@ -614,11 +613,7 @@ static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
switch (opcode) {
case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
case CXL_MBOX_OP_GET_FEATURE:
- if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
- return false;
- if (scope >= FWCTL_RPC_CONFIGURATION)
- return true;
- return false;
+ return cxl_mbox->feat_cap >= CXL_FEATURES_RO;
case CXL_MBOX_OP_SET_FEATURE:
if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
return false;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 70cae4ebf8a4..ab1007495f6b 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -34,7 +34,8 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc)
return rc;
- dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
+ dev_dbg(port->uport_dev, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&port->dev));
return 0;
}
@@ -603,7 +604,7 @@ int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
return 0;
}
-static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -666,15 +667,15 @@ static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long lon
skip = res->start - skip_start;
if (size > avail) {
- dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- res->name, &avail);
+ dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size,
+ res->name, (u64)avail);
return -ENOSPC;
}
return __cxl_dpa_reserve(cxled, start, size, skip);
}
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_port *port = cxled_to_port(cxled);
int rc;
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index d72764056ce6..2689e6453c5a 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -922,12 +922,19 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
hpa_alias = hpa - cache_size;
}
- if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA) {
+ if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
+
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
hpa_alias, &evt->gen_media);
- else if (event_type == CXL_CPER_EVENT_DRAM)
+ } else if (event_type == CXL_CPER_EVENT_DRAM) {
+ if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
+
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
&evt->dram);
+ }
}
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index a16a5886d40a..f88a13adf7fa 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -27,6 +27,7 @@ static void cxl_memdev_release(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
ida_free(&cxl_memdev_ida, cxlmd->id);
+ devm_cxl_memdev_edac_release(cxlmd);
kfree(cxlmd);
}
@@ -153,8 +154,8 @@ static ssize_t security_state_show(struct device *dev,
return sysfs_emit(buf, "frozen\n");
if (state & CXL_PMEM_SEC_STATE_LOCKED)
return sysfs_emit(buf, "locked\n");
- else
- return sysfs_emit(buf, "unlocked\n");
+
+ return sysfs_emit(buf, "unlocked\n");
}
static struct device_attribute dev_attr_security_state =
__ATTR(state, 0444, security_state_show, NULL);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 3b80e9a76ba8..b50551601c2e 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -415,17 +415,20 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
*/
if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
return devm_cxl_enable_mem(&port->dev, cxlds);
- else if (!hdm)
- return -ENODEV;
- root = to_cxl_port(port->dev.parent);
- while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
- root = to_cxl_port(root->dev.parent);
- if (!is_cxl_root(root)) {
- dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ /*
+ * If the HDM Decoder Capability does not exist and DVSEC was
+ * not setup, the DVSEC based emulation cannot be used.
+ */
+ if (!hdm)
return -ENODEV;
- }
+ /* The HDM Decoder Capability exists but is globally disabled. */
+
+ /*
+ * If the DVSEC CXL Range registers are not enabled, just
+ * enable and use the HDM Decoder Capability registers.
+ */
if (!info->mem_enabled) {
rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
if (rc)
@@ -434,6 +437,26 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return devm_cxl_enable_mem(&port->dev, cxlds);
}
+ /*
+ * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
+ * [High,Low] when HDM operation is enabled the range register values
+ * are ignored by the device, but the spec also recommends matching the
+ * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
+ * are expected even though Linux does not require or maintain that
+ * match. Check if at least one DVSEC range is enabled and allowed by
+ * the platform. That is, the DVSEC range must be covered by a locked
+ * platform window (CFMWS). Fail otherwise as the endpoint's decoders
+ * cannot be used.
+ */
+
+ root = to_cxl_port(port->dev.parent);
+ while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
+ root = to_cxl_port(root->dev.parent);
+ if (!is_cxl_root(root)) {
+ dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ return -ENODEV;
+ }
+
for (i = 0, allowed = 0; i < info->ranges; i++) {
struct device *cxld_dev;
@@ -453,15 +476,6 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return -ENXIO;
}
- /*
- * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
- * [High,Low] when HDM operation is enabled the range register values
- * are ignored by the device, but the spec also recommends matching the
- * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
- * are expected even though Linux does not require or maintain that
- * match. If at least one DVSEC range is enabled and allowed, skip HDM
- * Decoder Capability Enable.
- */
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL");
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 726bd4a7de27..eb46c6764d20 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -602,17 +602,19 @@ struct cxl_port *to_cxl_port(const struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_port, "CXL");
+struct cxl_port *parent_port_of(struct cxl_port *port)
+{
+ if (!port || !port->parent_dport)
+ return NULL;
+ return port->parent_dport->port;
+}
+
static void unregister_port(void *_port)
{
struct cxl_port *port = _port;
- struct cxl_port *parent;
+ struct cxl_port *parent = parent_port_of(port);
struct device *lock_dev;
- if (is_cxl_root(port))
- parent = NULL;
- else
- parent = to_cxl_port(port->dev.parent);
-
/*
* CXL root port's and the first level of ports are unregistered
* under the platform firmware device lock, all other ports are
@@ -1035,15 +1037,6 @@ struct cxl_root *find_cxl_root(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, "CXL");
-void put_cxl_root(struct cxl_root *cxl_root)
-{
- if (!cxl_root)
- return;
-
- put_device(&cxl_root->port.dev);
-}
-EXPORT_SYMBOL_NS_GPL(put_cxl_root, "CXL");
-
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index c3f4dc244df7..6e5e1460068d 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -231,11 +231,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
- } else {
- dev_WARN(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
- return -ENXIO;
}
+ dev_WARN(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
}
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
@@ -865,10 +864,23 @@ static int match_auto_decoder(struct device *dev, const void *data)
return 0;
}
+/**
+ * cxl_port_pick_region_decoder() - assign or lookup a decoder for a region
+ * @port: a port in the ancestry of the endpoint implied by @cxled
+ * @cxled: endpoint decoder to be, or currently, mapped by @port
+ * @cxlr: region to establish, or validate, decode @port
+ *
+ * In the region creation path cxl_port_pick_region_decoder() is an
+ * allocator to find a free port. In the region assembly path, it is
+ * recalling the decoder that platform firmware picked for validation
+ * purposes.
+ *
+ * The result is recorded in a 'struct cxl_region_ref' in @port.
+ */
static struct cxl_decoder *
-cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region *cxlr)
+cxl_port_pick_region_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
@@ -916,7 +928,8 @@ static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
static struct cxl_region_ref *
alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_decoder *cxld)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -930,9 +943,6 @@ alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
continue;
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (auto_order_ok(port, iter->region, cxld))
continue;
}
@@ -1014,19 +1024,11 @@ static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
return 0;
}
-static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region_ref *cxl_rr)
+static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region_ref *cxl_rr,
+ struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
- if (!cxld) {
- dev_dbg(&cxlr->dev, "%s: no decoder available\n",
- dev_name(&port->dev));
- return -EBUSY;
- }
-
if (cxld->region) {
dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
dev_name(&port->dev), dev_name(&cxld->dev),
@@ -1117,7 +1119,16 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr, cxled);
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_port_pick_region_decoder(port, cxled, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+ return -EBUSY;
+ }
+
+ cxl_rr = alloc_region_ref(port, cxlr, cxled, cxld);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
@@ -1126,7 +1137,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
nr_targets_inc = true;
- rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
+ rc = cxl_rr_assign_decoder(port, cxlr, cxled, cxl_rr, cxld);
if (rc)
goto out_erase;
}
@@ -1446,7 +1457,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
- cxld->interleave_granularity != ig ||
+ (iw > 1 && cxld->interleave_granularity != ig) ||
!region_res_match_cxl_range(p, &cxld->hpa_range) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
@@ -1748,13 +1759,6 @@ static int cmp_interleave_pos(const void *a, const void *b)
return cxled_a->pos - cxled_b->pos;
}
-static struct cxl_port *next_port(struct cxl_port *port)
-{
- if (!port->parent_dport)
- return NULL;
- return port->parent_dport->port;
-}
-
static int match_switch_decoder_by_range(struct device *dev,
const void *data)
{
@@ -1781,7 +1785,7 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
struct device *dev;
int rc = -ENXIO;
- parent = next_port(port);
+ parent = parent_port_of(port);
if (!parent)
return rc;
@@ -1805,6 +1809,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
}
put_device(dev);
+ if (rc)
+ dev_err(port->uport_dev,
+ "failed to find %s:%s in target list of %s\n",
+ dev_name(&port->dev),
+ dev_name(port->parent_dport->dport_dev),
+ dev_name(&cxlsd->cxld.dev));
+
return rc;
}
@@ -1861,7 +1872,7 @@ static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
*/
/* Iterate from endpoint to root_port refining the position */
- for (iter = port; iter; iter = next_port(iter)) {
+ for (iter = port; iter; iter = parent_port_of(iter)) {
if (is_cxl_root(iter))
break;
@@ -1940,7 +1951,9 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "region already active\n");
return -EBUSY;
- } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ }
+
+ if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "interleave config missing\n");
return -ENXIO;
}
@@ -2160,6 +2173,12 @@ static int attach_target(struct cxl_region *cxlr,
rc = cxl_region_attach(cxlr, cxled, pos);
up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem);
+
+ if (rc)
+ dev_warn(cxled->cxld.dev.parent,
+ "failed to attach %s to %s: %d\n",
+ dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
+
return rc;
}
@@ -3196,20 +3215,49 @@ err:
return rc;
}
-static int match_root_decoder_by_range(struct device *dev,
- const void *data)
+static int match_decoder_by_range(struct device *dev, const void *data)
{
const struct range *r1, *r2 = data;
- struct cxl_root_decoder *cxlrd;
+ struct cxl_decoder *cxld;
- if (!is_root_decoder(dev))
+ if (!is_switch_decoder(dev))
return 0;
- cxlrd = to_cxl_root_decoder(dev);
- r1 = &cxlrd->cxlsd.cxld.hpa_range;
+ cxld = to_cxl_decoder(dev);
+ r1 = &cxld->hpa_range;
return range_contains(r1, r2);
}
+static struct cxl_decoder *
+cxl_port_find_switch_decoder(struct cxl_port *port, struct range *hpa)
+{
+ struct device *cxld_dev = device_find_child(&port->dev, hpa,
+ match_decoder_by_range);
+
+ return cxld_dev ? to_cxl_decoder(cxld_dev) : NULL;
+}
+
+static struct cxl_root_decoder *
+cxl_find_root_decoder(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+ struct cxl_decoder *root, *cxld = &cxled->cxld;
+ struct range *hpa = &cxld->hpa_range;
+
+ root = cxl_port_find_switch_decoder(&cxl_root->port, hpa);
+ if (!root) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s no CXL window for range %#llx:%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxld->dev),
+ cxld->hpa_range.start, cxld->hpa_range.end);
+ return NULL;
+ }
+
+ return to_cxl_root_decoder(&root->dev);
+}
+
static int match_region_by_range(struct device *dev, const void *data)
{
struct cxl_region_params *p;
@@ -3376,47 +3424,45 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
return cxlr;
}
-int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
+static struct cxl_region *
+cxl_find_region_by_range(struct cxl_root_decoder *cxlrd, struct range *hpa)
+{
+ struct device *region_dev;
+
+ region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
+ match_region_by_range);
+ if (!region_dev)
+ return NULL;
+
+ return to_cxl_region(region_dev);
+}
+
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct range *hpa = &cxled->cxld.hpa_range;
- struct cxl_decoder *cxld = &cxled->cxld;
- struct device *cxlrd_dev, *region_dev;
- struct cxl_root_decoder *cxlrd;
struct cxl_region_params *p;
- struct cxl_region *cxlr;
bool attach = false;
int rc;
- cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
- match_root_decoder_by_range);
- if (!cxlrd_dev) {
- dev_err(cxlmd->dev.parent,
- "%s:%s no CXL window for range %#llx:%#llx\n",
- dev_name(&cxlmd->dev), dev_name(&cxld->dev),
- cxld->hpa_range.start, cxld->hpa_range.end);
+ struct cxl_root_decoder *cxlrd __free(put_cxl_root_decoder) =
+ cxl_find_root_decoder(cxled);
+ if (!cxlrd)
return -ENXIO;
- }
-
- cxlrd = to_cxl_root_decoder(cxlrd_dev);
/*
* Ensure that if multiple threads race to construct_region() for @hpa
* one does the construction and the others add to that.
*/
mutex_lock(&cxlrd->range_lock);
- region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
- match_region_by_range);
- if (!region_dev) {
+ struct cxl_region *cxlr __free(put_cxl_region) =
+ cxl_find_region_by_range(cxlrd, hpa);
+ if (!cxlr)
cxlr = construct_region(cxlrd, cxled);
- region_dev = &cxlr->dev;
- } else
- cxlr = to_cxl_region(region_dev);
mutex_unlock(&cxlrd->range_lock);
rc = PTR_ERR_OR_ZERO(cxlr);
if (rc)
- goto out;
+ return rc;
attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
@@ -3436,9 +3482,6 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
p->res);
}
- put_device(region_dev);
-out:
- put_device(cxlrd_dev);
return rc;
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
@@ -3537,8 +3580,18 @@ out:
switch (cxlr->mode) {
case CXL_PARTMODE_PMEM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
return devm_cxl_add_pmem_region(cxlr);
case CXL_PARTMODE_RAM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
/*
* The region can not be manged by CXL if any portion of
* it is already online as 'System RAM'
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index a9ab46eb0610..3f1695c96abc 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -724,6 +724,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
+struct cxl_port *parent_port_of(struct cxl_port *port);
void cxl_port_commit_reap(struct cxl_decoder *cxld);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
@@ -736,10 +737,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
struct cxl_root *devm_cxl_add_root(struct device *host,
const struct cxl_root_ops *ops);
struct cxl_root *find_cxl_root(struct cxl_port *port);
-void put_cxl_root(struct cxl_root *cxl_root);
-DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_device(&_T->port.dev))
DEFINE_FREE(put_cxl_port, struct cxl_port *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+DEFINE_FREE(put_cxl_root_decoder, struct cxl_root_decoder *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
+DEFINE_FREE(put_cxl_region, struct cxl_region *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -856,8 +859,7 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
-int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled);
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
#else
@@ -869,8 +871,7 @@ static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
{
return NULL;
}
-static inline int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled)
+static inline int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
return 0;
}
@@ -912,4 +913,14 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
u16 cxl_gpf_get_dvsec(struct device *dev);
+static inline struct rw_semaphore *rwsem_read_intr_acquire(struct rw_semaphore *rwsem)
+{
+ if (down_read_interruptible(rwsem))
+ return NULL;
+
+ return rwsem;
+}
+
+DEFINE_FREE(rwsem_read_release, struct rw_semaphore *, if (_T) up_read(_T))
+
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 3ec6b906371b..551b0ba2caa1 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -45,6 +45,11 @@
* @endpoint: connection to the CXL port topology for this memory device
* @id: id number of this memdev instance.
* @depth: endpoint port depth
+ * @scrub_cycle: current scrub cycle set for this device
+ * @scrub_region_id: id number of a backed region (if any) for which current scrub cycle set
+ * @err_rec_array: List of xarrarys to store the memdev error records to
+ * check attributes for a memory repair operation are from
+ * current boot.
*/
struct cxl_memdev {
struct device dev;
@@ -56,6 +61,9 @@ struct cxl_memdev {
struct cxl_port *endpoint;
int id;
int depth;
+ u8 scrub_cycle;
+ int scrub_region_id;
+ void *err_rec_array;
};
static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
@@ -527,6 +535,7 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_SUPPORTED_FEATURES = 0x0500,
CXL_MBOX_OP_GET_FEATURE = 0x0501,
CXL_MBOX_OP_SET_FEATURE = 0x0502,
+ CXL_MBOX_OP_DO_MAINTENANCE = 0x0600,
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
@@ -853,6 +862,27 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
+#ifdef CONFIG_CXL_EDAC_MEM_FEATURES
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd);
+int devm_cxl_region_edac_register(struct cxl_region *cxlr);
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt);
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt);
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd);
+#else
+static inline int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{ return 0; }
+static inline int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{ return 0; }
+static inline int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline int cxl_store_rec_dram(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{ return; }
+#endif
+
#ifdef CONFIG_CXL_SUSPEND
void cxl_mem_active_inc(void);
void cxl_mem_active_dec(void);
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 9675243bd05b..6e6777b7bafb 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -180,6 +180,10 @@ static int cxl_mem_probe(struct device *dev)
return rc;
}
+ rc = devm_cxl_memdev_edac_register(cxlmd);
+ if (rc)
+ dev_dbg(dev, "CXL memdev EDAC registration failed rc=%d\n", rc);
+
/*
* The kernel may be operating out of CXL memory on this device,
* there is no spec defined way to determine whether this device
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index a35fc5552845..fe4b593331da 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -30,7 +30,7 @@ static void schedule_detach(void *cxlmd)
schedule_cxl_memdev_detach(cxlmd);
}
-static int discover_region(struct device *dev, void *root)
+static int discover_region(struct device *dev, void *unused)
{
struct cxl_endpoint_decoder *cxled;
int rc;
@@ -49,7 +49,7 @@ static int discover_region(struct device *dev, void *root)
* Region enumeration is opportunistic, if this add-event fails,
* continue to the next endpoint decoder.
*/
- rc = cxl_add_to_region(root, cxled);
+ rc = cxl_add_to_region(cxled);
if (rc)
dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
@@ -95,7 +95,6 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_hdm *cxlhdm;
- struct cxl_port *root;
int rc;
rc = cxl_dvsec_rr_decode(cxlds, &info);
@@ -127,18 +126,10 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
return rc;
/*
- * This can't fail in practice as CXL root exit unregisters all
- * descendant ports and that in turn synchronizes with cxl_port_probe()
- */
- struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
-
- root = &cxl_root->port;
-
- /*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders
*/
- device_for_each_child(&port->dev, root, discover_region);
+ device_for_each_child(&port->dev, NULL, discover_region);
return 0;
}
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index e97d47f42ee2..584c70a34b52 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -13,6 +13,7 @@
#include <linux/mman.h>
#include <linux/memory-tiers.h>
#include <linux/memory_hotplug.h>
+#include <linux/string_helpers.h>
#include "dax-private.h"
#include "bus.h"
@@ -68,7 +69,7 @@ static void kmem_put_memory_types(void)
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
- unsigned long total_len = 0;
+ unsigned long total_len = 0, orig_len = 0;
struct dax_kmem_data *data;
struct memory_dev_type *mtype;
int i, rc, mapped = 0;
@@ -97,6 +98,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
for (i = 0; i < dev_dax->nr_range; i++) {
struct range range;
+ orig_len += range_len(&dev_dax->ranges[i].range);
rc = dax_kmem_range(dev_dax, i, &range);
if (rc) {
dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
@@ -109,6 +111,12 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
if (!total_len) {
dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
return -EINVAL;
+ } else if (total_len != orig_len) {
+ char buf[16];
+
+ string_get_size(orig_len - total_len, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
+ dev_warn(dev, "DAX region truncated by %s due to alignment\n", buf);
}
init_node_memory_type(numa_node, mtype);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 760b7d81fcd8..80355d03004d 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -702,27 +702,6 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd)
idxd->pasid = IOMMU_PASID_INVALID;
}
-static int idxd_enable_sva(struct pci_dev *pdev)
-{
- int ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- return ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret)
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-
- return ret;
-}
-
-static void idxd_disable_sva(struct pci_dev *pdev)
-{
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-}
-
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -737,17 +716,13 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- if (idxd_enable_sva(pdev)) {
- dev_warn(dev, "Unable to turn on user SVA feature.\n");
- } else {
- set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+ set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- rc = idxd_enable_system_pasid(idxd);
- if (rc)
- dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
- else
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
- }
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc)
+ dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -785,8 +760,6 @@ static int idxd_probe(struct idxd_device *idxd)
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
return rc;
}
@@ -797,8 +770,6 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(idxd->pdev);
}
/*
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 20333608b983..cae52c654a15 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1746,9 +1746,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR)
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
else
- writel(priv->ce_set_mask, set_addr);
+ writew(priv->ce_set_mask, set_addr);
/* Ensure the interrupt test bits are set */
wmb();
@@ -1778,7 +1778,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
} else {
/* Setup read/write of 4 bytes */
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
index 3b1a845457b0..d1a8caa85369 100755
--- a/drivers/edac/mem_repair.c
+++ b/drivers/edac/mem_repair.c
@@ -45,6 +45,15 @@ struct edac_mem_repair_context {
struct attribute_group group;
};
+const char * const edac_repair_type[] = {
+ [EDAC_REPAIR_PPR] = "ppr",
+ [EDAC_REPAIR_CACHELINE_SPARING] = "cacheline-sparing",
+ [EDAC_REPAIR_ROW_SPARING] = "row-sparing",
+ [EDAC_REPAIR_BANK_SPARING] = "bank-sparing",
+ [EDAC_REPAIR_RANK_SPARING] = "rank-sparing",
+};
+EXPORT_SYMBOL_GPL(edac_repair_type);
+
#define TO_MR_DEV_ATTR(_dev_attr) \
container_of(_dev_attr, struct edac_mem_repair_dev_attr, dev_attr)
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 7df19d82aa68..bbd2155d8483 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -267,6 +267,23 @@ config TURRIS_MOX_RWTM
other manufacturing data and also utilize the Entropy Bit Generator
for hardware random number generation.
+if TURRIS_MOX_RWTM
+
+config TURRIS_MOX_RWTM_KEYCTL
+ bool "Turris Mox rWTM ECDSA message signing"
+ default y
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ select CZNIC_PLATFORMS
+ select TURRIS_SIGNING_KEY
+ help
+ Say Y here to add support for ECDSA message signing with board private
+ key (each Turris Mox has an ECDSA private key generated in the secure
+ coprocessor when manufactured). This functionality is exposed via the
+ keyctl() syscall.
+
+endif # TURRIS_MOX_RWTM
+
source "drivers/firmware/arm_ffa/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/cirrus/Kconfig"
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index dabd874641d0..e3fb36825978 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -69,6 +69,19 @@ config ARM_SCMI_DEBUG_COUNTERS
such useful debug counters. This can be helpful for debugging and
SCMI monitoring.
+config ARM_SCMI_QUIRKS
+ bool "Enable SCMI Quirks framework"
+ depends on JUMP_LABEL || COMPILE_TEST
+ default y
+ help
+ Enables support for SCMI Quirks framework to workaround SCMI platform
+ firmware bugs on system already deployed in the wild.
+
+ The framework allows the definition of platform-specific code quirks
+ that will be associated and enabled only on the desired platforms
+ depending on the SCMI firmware advertised versions and/or machine
+ compatibles.
+
source "drivers/firmware/arm_scmi/transports/Kconfig"
source "drivers/firmware/arm_scmi/vendors/imx/Kconfig"
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 9ac81adff567..780cd62b2f78 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -3,6 +3,7 @@ scmi-bus-y = bus.o
scmi-core-objs := $(scmi-bus-y)
scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_QUIRKS) += quirks.o
scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 3a5474015f7d..1adef0389475 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -201,55 +201,51 @@ scmi_protocol_table_unregister(const struct scmi_device_id *id_table)
scmi_protocol_device_unrequest(entry);
}
-static const struct scmi_device_id *
-scmi_dev_match_id(struct scmi_device *scmi_dev, const struct scmi_driver *scmi_drv)
+static int scmi_dev_match_by_id_table(struct scmi_device *scmi_dev,
+ const struct scmi_device_id *id_table)
{
- const struct scmi_device_id *id = scmi_drv->id_table;
-
- if (!id)
- return NULL;
-
- for (; id->protocol_id; id++)
- if (id->protocol_id == scmi_dev->protocol_id) {
- if (!id->name)
- return id;
- else if (!strcmp(id->name, scmi_dev->name))
- return id;
- }
+ if (!id_table || !id_table->name)
+ return 0;
+
+ /* Always skip transport devices from matching */
+ for (; id_table->protocol_id && id_table->name; id_table++)
+ if (id_table->protocol_id == scmi_dev->protocol_id &&
+ strncmp(scmi_dev->name, "__scmi_transport_device", 23) &&
+ !strcmp(id_table->name, scmi_dev->name))
+ return 1;
+ return 0;
+}
- return NULL;
+static int scmi_dev_match_id(struct scmi_device *scmi_dev,
+ const struct scmi_driver *scmi_drv)
+{
+ return scmi_dev_match_by_id_table(scmi_dev, scmi_drv->id_table);
}
static int scmi_dev_match(struct device *dev, const struct device_driver *drv)
{
const struct scmi_driver *scmi_drv = to_scmi_driver(drv);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- const struct scmi_device_id *id;
-
- id = scmi_dev_match_id(scmi_dev, scmi_drv);
- if (id)
- return 1;
- return 0;
+ return scmi_dev_match_id(scmi_dev, scmi_drv);
}
static int scmi_match_by_id_table(struct device *dev, const void *data)
{
- struct scmi_device *sdev = to_scmi_dev(dev);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id_table = data;
- return sdev->protocol_id == id_table->protocol_id &&
- (id_table->name && !strcmp(sdev->name, id_table->name));
+ return scmi_dev_match_by_id_table(scmi_dev, id_table);
}
static struct scmi_device *scmi_child_dev_find(struct device *parent,
int prot_id, const char *name)
{
- struct scmi_device_id id_table;
+ struct scmi_device_id id_table[2] = { 0 };
struct device *dev;
- id_table.protocol_id = prot_id;
- id_table.name = name;
+ id_table[0].protocol_id = prot_id;
+ id_table[0].name = name;
dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
if (!dev)
@@ -463,6 +459,20 @@ put_dev:
return NULL;
}
+static struct scmi_device *
+_scmi_device_create(struct device_node *np, struct device *parent,
+ int protocol, const char *name)
+{
+ struct scmi_device *sdev;
+
+ sdev = __scmi_device_create(np, parent, protocol, name);
+ if (!sdev)
+ pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node), protocol, name);
+
+ return sdev;
+}
+
/**
* scmi_device_create - A method to create one or more SCMI devices
*
@@ -495,7 +505,7 @@ struct scmi_device *scmi_device_create(struct device_node *np,
struct scmi_device *scmi_dev = NULL;
if (name)
- return __scmi_device_create(np, parent, protocol, name);
+ return _scmi_device_create(np, parent, protocol, name);
mutex_lock(&scmi_requested_devices_mtx);
phead = idr_find(&scmi_requested_devices, protocol);
@@ -509,18 +519,13 @@ struct scmi_device *scmi_device_create(struct device_node *np,
list_for_each_entry(rdev, phead, node) {
struct scmi_device *sdev;
- sdev = __scmi_device_create(np, parent,
- rdev->id_table->protocol_id,
- rdev->id_table->name);
- /* Report errors and carry on... */
+ sdev = _scmi_device_create(np, parent,
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
if (sdev)
scmi_dev = sdev;
- else
- pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- rdev->id_table->protocol_id,
- rdev->id_table->name);
}
+
mutex_unlock(&scmi_requested_devices_mtx);
return scmi_dev;
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 2ed2279388f0..afa7981efe82 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -11,6 +11,7 @@
#include "protocols.h"
#include "notify.h"
+#include "quirks.h"
/* Updated only after ALL the mandatory features for that version are merged */
#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
@@ -429,6 +430,23 @@ static void iter_clk_describe_prepare_message(void *message,
msg->rate_index = cpu_to_le32(desc_index);
}
+#define QUIRK_OUT_OF_SPEC_TRIPLET \
+ ({ \
+ /* \
+ * A known quirk: a triplet is returned but num_returned != 3 \
+ * Check for a safe payload size and fix. \
+ */ \
+ if (st->num_returned != 3 && st->num_remaining == 0 && \
+ st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { \
+ st->num_returned = 3; \
+ st->num_remaining = 0; \
+ } else { \
+ dev_err(p->dev, \
+ "Cannot fix out-of-spec reply !\n"); \
+ return -EPROTO; \
+ } \
+ })
+
static int
iter_clk_describe_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
@@ -450,19 +468,8 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st,
p->clk->name, st->num_returned, st->num_remaining,
st->rx_len);
- /*
- * A known quirk: a triplet is returned but num_returned != 3
- * Check for a safe payload size and fix.
- */
- if (st->num_returned != 3 && st->num_remaining == 0 &&
- st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
- st->num_returned = 3;
- st->num_remaining = 0;
- } else {
- dev_err(p->dev,
- "Cannot fix out-of-spec reply !\n");
- return -EPROTO;
- }
+ SCMI_QUIRK(clock_rates_triplet_out_of_spec,
+ QUIRK_OUT_OF_SPEC_TRIPLET);
}
return 0;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 10ea7962323e..dab758c5fdea 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -475,6 +475,7 @@ static int __tag##_probe(struct platform_device *pdev) \
if (ret) \
goto err; \
\
+ spdev->dev.parent = dev; \
ret = platform_device_add(spdev); \
if (ret) \
goto err; \
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 0390d5ff195e..395fe9289035 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -11,7 +11,7 @@
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
- * Copyright (C) 2018-2024 ARM Ltd.
+ * Copyright (C) 2018-2025 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -38,6 +38,7 @@
#include "common.h"
#include "notify.h"
+#include "quirks.h"
#include "raw_mode.h"
@@ -439,14 +440,8 @@ static void scmi_create_protocol_devices(struct device_node *np,
struct scmi_info *info,
int prot_id, const char *name)
{
- struct scmi_device *sdev;
-
mutex_lock(&info->devreq_mtx);
- sdev = scmi_device_create(np, info->dev, prot_id, name);
- if (name && !sdev)
- dev_err(info->dev,
- "failed to create device for protocol 0x%X (%s)\n",
- prot_id, name);
+ scmi_device_create(np, info->dev, prot_id, name);
mutex_unlock(&info->devreq_mtx);
}
@@ -1190,7 +1185,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
* RX path since it will be already queued at the end of the TX
* poll loop.
*/
- if (!xfer->hdr.poll_completion)
+ if (!xfer->hdr.poll_completion ||
+ xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
scmi_raw_message_report(info->raw, xfer,
SCMI_RAW_REPLY_QUEUE,
cinfo->id);
@@ -1738,6 +1734,39 @@ static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
}
/**
+ * scmi_protocol_msg_check - Check protocol message attributes
+ *
+ * @ph: A reference to the protocol handle.
+ * @message_id: The ID of the message to check.
+ * @attributes: A parameter to optionally return the retrieved message
+ * attributes, in case of Success.
+ *
+ * An helper to check protocol message attributes for a specific protocol
+ * and message pair.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
+ u32 message_id, u32 *attributes)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(message_id, t->tx.buf);
+ ret = do_xfer(ph, t);
+ if (!ret && attributes)
+ *attributes = get_unaligned_le32(t->rx.buf);
+ xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
* struct scmi_iterator - Iterator descriptor
* @msg: A reference to the message TX buffer; filled by @prepare_message with
* a proper custom command payload for each multi-part command request.
@@ -1869,6 +1898,13 @@ struct scmi_msg_resp_desc_fc {
__le32 db_preserve_hmask;
};
+#define QUIRK_PERF_FC_FORCE \
+ ({ \
+ if (pi->proto->id == SCMI_PROTOCOL_PERF && \
+ message_id == 0x8 /* PERF_LEVEL_GET */) \
+ attributes |= BIT(0); \
+ })
+
static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
u8 describe_id, u32 message_id, u32 valid_size,
@@ -1878,6 +1914,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
int ret;
u32 flags;
u64 phys_addr;
+ u32 attributes;
u8 size;
void __iomem *addr;
struct scmi_xfer *t;
@@ -1886,6 +1923,16 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
struct scmi_msg_resp_desc_fc *resp;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ /* Check if the MSG_ID supports fastchannel */
+ ret = scmi_protocol_msg_check(ph, message_id, &attributes);
+ SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE);
+ if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
+ dev_dbg(ph->dev,
+ "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n",
+ pi->proto->id, message_id, domain, ret);
+ return;
+ }
+
if (!p_addr) {
ret = -EINVAL;
goto err_out;
@@ -2003,39 +2050,6 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
SCMI_PROTO_FC_RING_DB(64);
}
-/**
- * scmi_protocol_msg_check - Check protocol message attributes
- *
- * @ph: A reference to the protocol handle.
- * @message_id: The ID of the message to check.
- * @attributes: A parameter to optionally return the retrieved message
- * attributes, in case of Success.
- *
- * An helper to check protocol message attributes for a specific protocol
- * and message pair.
- *
- * Return: 0 on SUCCESS
- */
-static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
- u32 message_id, u32 *attributes)
-{
- int ret;
- struct scmi_xfer *t;
-
- ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
- sizeof(__le32), 0, &t);
- if (ret)
- return ret;
-
- put_unaligned_le32(message_id, t->tx.buf);
- ret = do_xfer(ph, t);
- if (!ret && attributes)
- *attributes = get_unaligned_le32(t->rx.buf);
- xfer_put(ph, t);
-
- return ret;
-}
-
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.get_max_msg_size = scmi_common_get_max_msg_size,
@@ -2828,9 +2842,8 @@ static int scmi_bus_notifier(struct notifier_block *nb,
struct scmi_info *info = bus_nb_to_scmi_info(nb);
struct scmi_device *sdev = to_scmi_dev(data);
- /* Skip transport devices and devices of different SCMI instances */
- if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
- sdev->dev.parent != info->dev)
+ /* Skip devices of different SCMI instances */
+ if (sdev->dev.parent != info->dev)
return NOTIFY_DONE;
switch (action) {
@@ -3101,6 +3114,18 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
return &trans->desc;
}
+static void scmi_enable_matching_quirks(struct scmi_info *info)
+{
+ struct scmi_revision_info *rev = &info->version;
+
+ dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n",
+ rev->vendor_id, rev->sub_vendor_id, rev->impl_ver);
+
+ /* Enable applicable quirks */
+ scmi_quirks_enable(info->dev, rev->vendor_id,
+ rev->sub_vendor_id, rev->impl_ver);
+}
+
static int scmi_probe(struct platform_device *pdev)
{
int ret;
@@ -3222,6 +3247,8 @@ static int scmi_probe(struct platform_device *pdev)
list_add_tail(&info->node, &scmi_list);
mutex_unlock(&scmi_list_mutex);
+ scmi_enable_matching_quirks(info);
+
for_each_available_child_of_node(np, child) {
u32 prot_id;
@@ -3380,6 +3407,8 @@ static struct dentry *scmi_debugfs_init(void)
static int __init scmi_driver_init(void)
{
+ scmi_quirks_initialize();
+
/* Bail out if no SCMI transport was configured */
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index aaee57cdcd55..d62c4469d1fd 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -31,6 +31,8 @@
#define SCMI_PROTOCOL_VENDOR_BASE 0x80
+#define MSG_SUPPORTS_FASTCHANNEL(x) ((x) & BIT(0))
+
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c
new file mode 100644
index 000000000000..03960aca3610
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+
+/**
+ * DOC: Theory of operation
+ *
+ * A framework to define SCMI quirks and their activation conditions based on
+ * existing static_keys kernel facilities.
+ *
+ * Quirks are named and their activation conditions defined using the macro
+ * DEFINE_SCMI_QUIRK() in this file.
+ *
+ * After a quirk is defined, a corresponding entry must also be added to the
+ * global @scmi_quirks_table in this file using __DECLARE_SCMI_QUIRK_ENTRY().
+ *
+ * Additionally a corresponding quirk declaration must be added also to the
+ * quirk.h file using DECLARE_SCMI_QUIRK().
+ *
+ * The needed quirk code-snippet itself will be defined local to the SCMI code
+ * that is meant to fix and will be associated to the previously defined quirk
+ * and related activation conditions using the macro SCMI_QUIRK().
+ *
+ * At runtime, during the SCMI stack probe sequence, once the SCMI Server had
+ * advertised the running platform Vendor, SubVendor and Implementation Version
+ * data, all the defined quirks matching the activation conditions will be
+ * enabled.
+ *
+ * Example
+ *
+ * quirk.c
+ * -------
+ * DEFINE_SCMI_QUIRK(fix_me, "vendor", "subvend", "0x12000-0x30000",
+ * "someone,plat_A", "another,plat_b", "vend,sku");
+ *
+ * static struct scmi_quirk *scmi_quirks_table[] = {
+ * ...
+ * __DECLARE_SCMI_QUIRK_ENTRY(fix_me),
+ * NULL
+ * };
+ *
+ * quirk.h
+ * -------
+ * DECLARE_SCMI_QUIRK(fix_me);
+ *
+ * <somewhere_in_the_scmi_stack.c>
+ * ------------------------------
+ *
+ * #define QUIRK_CODE_SNIPPET_FIX_ME() \
+ * ({ \
+ * if (p->condition) \
+ * a_ptr->calculated_val = 123; \
+ * })
+ *
+ *
+ * int some_function_to_fix(int param, struct something *p)
+ * {
+ * struct some_strut *a_ptr;
+ *
+ * a_ptr = some_load_func(p);
+ * SCMI_QUIRK(fix_me, QUIRK_CODE_SNIPPET_FIX_ME);
+ * some_more_func(a_ptr);
+ * ...
+ *
+ * return 0;
+ * }
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/hashtable.h>
+#include <linux/kstrtox.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/static_key.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
+#include <linux/types.h>
+
+#include "quirks.h"
+
+#define SCMI_QUIRKS_HT_SZ 4
+
+struct scmi_quirk {
+ bool enabled;
+ const char *name;
+ char *vendor;
+ char *sub_vendor_id;
+ char *impl_ver_range;
+ u32 start_range;
+ u32 end_range;
+ struct static_key_false *key;
+ struct hlist_node hash;
+ unsigned int hkey;
+ const char *const compats[];
+};
+
+#define __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ...) \
+ static struct scmi_quirk scmi_quirk_entry_ ## _qn = { \
+ .name = __stringify(quirk_ ## _qn), \
+ .vendor = _ven, \
+ .sub_vendor_id = _sub, \
+ .impl_ver_range = _impl, \
+ .key = &(scmi_quirk_ ## _qn), \
+ .compats = { __VA_ARGS__ __VA_OPT__(,) NULL }, \
+ }
+
+#define __DECLARE_SCMI_QUIRK_ENTRY(_qn) (&(scmi_quirk_entry_ ## _qn))
+
+/*
+ * Define a quirk by name and provide the matching tokens where:
+ *
+ * _qn: A string which will be used to build the quirk and the global
+ * static_key names.
+ * _ven : SCMI Vendor ID string match, NULL means any.
+ * _sub : SCMI SubVendor ID string match, NULL means any.
+ * _impl : SCMI Implementation Version string match, NULL means any.
+ * This string can be used to express version ranges which will be
+ * interpreted as follows:
+ *
+ * NULL [0, 0xFFFFFFFF]
+ * "X" [X, X]
+ * "X-" [X, 0xFFFFFFFF]
+ * "-X" [0, X]
+ * "X-Y" [X, Y]
+ *
+ * with X <= Y and <v> in [X, Y] meaning X <= <v> <= Y
+ *
+ * ... : An optional variadic macros argument used to provide a comma-separated
+ * list of compatible strings matches; when no variadic argument is
+ * provided, ANY compatible will match this quirk.
+ *
+ * This implicitly define also a properly named global static-key that
+ * will be used to dynamically enable the quirk at initialization time.
+ *
+ * Note that it is possible to associate multiple quirks to the same
+ * matching pattern, if your firmware quality is really astounding :P
+ *
+ * Example:
+ *
+ * Compatibles list NOT provided, so ANY compatible will match:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000");
+ *
+ *
+ * A few compatibles provided to match against:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000",
+ * "xvend,plat_a", "xvend,plat_b", "xvend,sku_name");
+ */
+#define DEFINE_SCMI_QUIRK(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/*
+ * Same as DEFINE_SCMI_QUIRK but EXPORTED: this is meant to address quirks
+ * that possibly reside in code that is included in loadable kernel modules
+ * that needs to be able to access the global static keys at runtime to
+ * determine if enabled or not. (see SCMI_QUIRK to understand usage)
+ */
+#define DEFINE_SCMI_QUIRK_EXPORTED(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ EXPORT_SYMBOL_GPL(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/* Global Quirks Definitions */
+DEFINE_SCMI_QUIRK(clock_rates_triplet_out_of_spec, NULL, NULL, NULL);
+DEFINE_SCMI_QUIRK(perf_level_get_fc_force, "Qualcomm", NULL, "0x20000-");
+
+/*
+ * Quirks Pointers Array
+ *
+ * This is filled at compile-time with the list of pointers to all the currently
+ * defined quirks descriptors.
+ */
+static struct scmi_quirk *scmi_quirks_table[] = {
+ __DECLARE_SCMI_QUIRK_ENTRY(clock_rates_triplet_out_of_spec),
+ __DECLARE_SCMI_QUIRK_ENTRY(perf_level_get_fc_force),
+ NULL
+};
+
+/*
+ * Quirks HashTable
+ *
+ * A run-time populated hashtable containing all the defined quirks descriptors
+ * hashed by matching pattern.
+ */
+static DEFINE_READ_MOSTLY_HASHTABLE(scmi_quirks_ht, SCMI_QUIRKS_HT_SZ);
+
+static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend)
+{
+ char *signature, *p;
+ unsigned int hash32;
+ unsigned long hash = 0;
+
+ /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
+ signature = kasprintf(GFP_KERNEL, "|%s|%s|", vend ?: "", sub_vend ?: "");
+ if (!signature)
+ return 0;
+
+ pr_debug("SCMI Quirk Signature >>>%s<<<\n", signature);
+
+ p = signature;
+ while (*p)
+ hash = partial_name_hash(tolower(*p++), hash);
+ hash32 = end_name_hash(hash);
+
+ kfree(signature);
+
+ return hash32;
+}
+
+static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
+{
+ const char *last, *first = quirk->impl_ver_range;
+ size_t len;
+ char *sep;
+ int ret;
+
+ quirk->start_range = 0;
+ quirk->end_range = 0xFFFFFFFF;
+ len = quirk->impl_ver_range ? strlen(quirk->impl_ver_range) : 0;
+ if (!len)
+ return 0;
+
+ last = first + len - 1;
+ sep = strchr(quirk->impl_ver_range, '-');
+ if (sep)
+ *sep = '\0';
+
+ if (sep == first) /* -X */
+ ret = kstrtouint(first + 1, 0, &quirk->end_range);
+ else /* X OR X- OR X-y */
+ ret = kstrtouint(first, 0, &quirk->start_range);
+ if (ret)
+ return ret;
+
+ if (!sep)
+ quirk->end_range = quirk->start_range;
+ else if (sep != last) /* x-Y */
+ ret = kstrtouint(sep + 1, 0, &quirk->end_range);
+
+ if (quirk->start_range > quirk->end_range)
+ return -EINVAL;
+
+ return ret;
+}
+
+void scmi_quirks_initialize(void)
+{
+ struct scmi_quirk *quirk;
+ int i;
+
+ for (i = 0, quirk = scmi_quirks_table[0]; quirk;
+ i++, quirk = scmi_quirks_table[i]) {
+ int ret;
+
+ ret = scmi_quirk_range_parse(quirk);
+ if (ret) {
+ pr_err("SCMI skip QUIRK [%s] - BAD RANGE - |%s|\n",
+ quirk->name, quirk->impl_ver_range);
+ continue;
+ }
+ quirk->hkey = scmi_quirk_signature(quirk->vendor,
+ quirk->sub_vendor_id);
+
+ hash_add(scmi_quirks_ht, &quirk->hash, quirk->hkey);
+
+ pr_debug("Registered SCMI QUIRK [%s] -- %p - Key [0x%08X] - %s/%s/[0x%08X-0x%08X]\n",
+ quirk->name, quirk, quirk->hkey,
+ quirk->vendor, quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+ }
+
+ pr_debug("SCMI Quirks initialized\n");
+}
+
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl)
+{
+ for (int i = 3; i >= 0; i--) {
+ struct scmi_quirk *quirk;
+ unsigned int hkey;
+
+ hkey = scmi_quirk_signature(i > 1 ? vend : NULL,
+ i > 2 ? subv : NULL);
+
+ /*
+ * Note that there could be multiple matches so we
+ * will enable multiple quirk part of a hash collision
+ * domain...BUT we cannot assume that ALL quirks on the
+ * same collision domain are a full match.
+ */
+ hash_for_each_possible(scmi_quirks_ht, quirk, hash, hkey) {
+ if (quirk->enabled || quirk->hkey != hkey ||
+ impl < quirk->start_range ||
+ impl > quirk->end_range)
+ continue;
+
+ if (quirk->compats[0] &&
+ !of_machine_compatible_match(quirk->compats))
+ continue;
+
+ dev_info(dev, "Enabling SCMI Quirk [%s]\n",
+ quirk->name);
+
+ dev_dbg(dev,
+ "Quirk matched on: %s/%s/%s/[0x%08X-0x%08X]\n",
+ quirk->compats[0], quirk->vendor,
+ quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+
+ static_branch_enable(quirk->key);
+ quirk->enabled = true;
+ }
+ }
+}
diff --git a/drivers/firmware/arm_scmi/quirks.h b/drivers/firmware/arm_scmi/quirks.h
new file mode 100644
index 000000000000..a71fde85a527
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+#ifndef _SCMI_QUIRKS_H
+#define _SCMI_QUIRKS_H
+
+#include <linux/static_key.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_ARM_SCMI_QUIRKS
+
+#define DECLARE_SCMI_QUIRK(_qn) \
+ DECLARE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn)
+
+/*
+ * A helper to associate the actual code snippet to use as a quirk
+ * named as _qn.
+ */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (static_branch_unlikely(&(scmi_quirk_ ## _qn))) \
+ (_blk); \
+ } while (0)
+
+void scmi_quirks_initialize(void);
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl);
+
+#else
+
+#define DECLARE_SCMI_QUIRK(_qn)
+/* Force quirks compilation even when SCMI Quirks are disabled */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (0) \
+ (_blk); \
+ } while (0)
+
+static inline void scmi_quirks_initialize(void) { }
+static inline void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *sub_vend, const u32 impl) { }
+
+#endif /* CONFIG_ARM_SCMI_QUIRKS */
+
+/* Quirk delarations */
+DECLARE_SCMI_QUIRK(clock_rates_triplet_out_of_spec);
+DECLARE_SCMI_QUIRK(perf_level_get_fc_force);
+
+#endif /* _SCMI_QUIRKS_H */
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 7cc0d616b8de..3d543b1d8947 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -671,11 +671,13 @@ static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
* @len: Length of the message in @buf.
* @chan_id: The channel ID to use.
* @async: A flag stating if an asynchronous command is required.
+ * @poll: A flag stating if a polling transmission is required.
*
* Return: 0 on Success
*/
static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
- void *buf, size_t len, u8 chan_id, bool async)
+ void *buf, size_t len, u8 chan_id,
+ bool async, bool poll)
{
int ret;
struct scmi_xfer *xfer;
@@ -684,6 +686,16 @@ static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
if (ret)
return ret;
+ if (poll) {
+ if (is_transport_polling_capable(raw->desc)) {
+ xfer->hdr.poll_completion = true;
+ } else {
+ dev_err(raw->handle->dev,
+ "Failed to send RAW message - Polling NOT supported\n");
+ return -EINVAL;
+ }
+ }
+
ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
if (ret)
scmi_xfer_raw_put(raw->handle, xfer);
@@ -801,7 +813,7 @@ static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos,
- bool async)
+ bool async, bool poll)
{
int ret;
struct scmi_dbg_raw_data *rd = filp->private_data;
@@ -831,7 +843,7 @@ static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
}
ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
- rd->chan_id, async);
+ rd->chan_id, async, poll);
/* Reset ppos for next message ... */
rd->tx_size = 0;
@@ -875,7 +887,8 @@ static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, false);
}
static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
@@ -964,7 +977,8 @@ static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, false);
}
static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
@@ -976,6 +990,40 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.owner = THIS_MODULE,
};
+static ssize_t scmi_dbg_raw_mode_message_poll_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_poll_async_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_async_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
@@ -1199,6 +1247,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
debugfs_create_file("message_async", 0600, raw->dentry, raw,
&scmi_dbg_raw_mode_message_async_fops);
+ debugfs_create_file("message_poll", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file("message_poll_async", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_async_fops);
+
debugfs_create_file("notification", 0400, raw->dentry, raw,
&scmi_dbg_raw_mode_notification_fops);
@@ -1230,6 +1284,14 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
debugfs_create_file_aux_num("message_async", 0600, chd,
raw, channels[i],
&scmi_dbg_raw_mode_message_async_fops);
+
+ debugfs_create_file_aux_num("message_poll", 0600, chd,
+ raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file_aux_num("message_poll_async", 0600,
+ chd, raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_async_fops);
}
}
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
index a01bf5e47301..c34c8c837441 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
@@ -12,6 +12,30 @@ config IMX_SCMI_BBM_EXT
To compile this driver as a module, choose M here: the
module will be called imx-sm-bbm.
+config IMX_SCMI_CPU_EXT
+ tristate "i.MX SCMI CPU EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_CPU_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System CPU Protocol to manage cpu
+ start, stop and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-cpu.
+
+config IMX_SCMI_LMM_EXT
+ tristate "i.MX SCMI LMM EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_LMM_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System Logical Machine Protocol to
+ manage Logical Machines boot, shutdown and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-lmm.
+
config IMX_SCMI_MISC_EXT
tristate "i.MX SCMI MISC EXTENSION"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Makefile b/drivers/firmware/arm_scmi/vendors/imx/Makefile
index d3ee6d544924..e3a5ea46345c 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Makefile
+++ b/drivers/firmware/arm_scmi/vendors/imx/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o
+obj-$(CONFIG_IMX_SCMI_CPU_EXT) += imx-sm-cpu.o
+obj-$(CONFIG_IMX_SCMI_LMM_EXT) += imx-sm-lmm.o
obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
new file mode 100644
index 000000000000..66f47f5371e5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP CPU Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_cpu_protocol_cmd {
+ SCMI_IMX_CPU_ATTRIBUTES = 0x3,
+ SCMI_IMX_CPU_START = 0x4,
+ SCMI_IMX_CPU_STOP = 0x5,
+ SCMI_IMX_CPU_RESET_VECTOR_SET = 0x6,
+ SCMI_IMX_CPU_INFO_GET = 0xC,
+};
+
+struct scmi_imx_cpu_info {
+ u32 nr_cpu;
+};
+
+#define SCMI_IMX_CPU_NR_CPU_MASK GENMASK(15, 0)
+struct scmi_msg_imx_cpu_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_cpu_attributes_out {
+ __le32 attributes;
+#define CPU_MAX_NAME 16
+ u8 name[CPU_MAX_NAME];
+};
+
+struct scmi_imx_cpu_reset_vector_set_in {
+ __le32 cpuid;
+#define CPU_VEC_FLAGS_RESUME BIT(31)
+#define CPU_VEC_FLAGS_START BIT(30)
+#define CPU_VEC_FLAGS_BOOT BIT(29)
+ __le32 flags;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_cpu_info_get_out {
+#define CPU_RUN_MODE_START 0
+#define CPU_RUN_MODE_HOLD 1
+#define CPU_RUN_MODE_STOP 2
+#define CPU_RUN_MODE_SLEEP 3
+ __le32 runmode;
+ __le32 sleepmode;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+static int scmi_imx_cpu_validate_cpuid(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_imx_cpu_info *info = ph->get_priv(ph);
+
+ if (cpuid >= info->nr_cpu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_cpu_start(const struct scmi_protocol_handle *ph,
+ u32 cpuid, bool start)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ if (start)
+ msg_id = SCMI_IMX_CPU_START;
+ else
+ msg_id = SCMI_IMX_CPU_STOP;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume)
+{
+ struct scmi_imx_cpu_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ if (start)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_START);
+ if (boot)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_BOOT);
+ if (resume)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_RESUME);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_started(const struct scmi_protocol_handle *ph, u32 cpuid,
+ bool *started)
+{
+ struct scmi_imx_cpu_info_get_out *out;
+ struct scmi_xfer *t;
+ u32 mode;
+ int ret;
+
+ if (!started)
+ return -EINVAL;
+
+ *started = false;
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_INFO_GET, sizeof(u32),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ mode = le32_to_cpu(out->runmode);
+ if (mode == CPU_RUN_MODE_START || mode == CPU_RUN_MODE_SLEEP)
+ *started = true;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_cpu_proto_ops scmi_imx_cpu_proto_ops = {
+ .cpu_reset_vector_set = scmi_imx_cpu_reset_vector_set,
+ .cpu_start = scmi_imx_cpu_start,
+ .cpu_started = scmi_imx_cpu_started,
+};
+
+static int scmi_imx_cpu_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_cpu_info *info)
+{
+ struct scmi_msg_imx_cpu_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ info->nr_cpu = le32_get_bits(attr->attributes, SCMI_IMX_CPU_NR_CPU_MASK);
+ dev_info(ph->dev, "i.MX SM CPU: %d cpus\n",
+ info->nr_cpu);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_msg_imx_cpu_attributes_out *out;
+ char name[SCMI_SHORT_NAME_MAX_SIZE] = {'\0'};
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->name, SCMI_SHORT_NAME_MAX_SIZE);
+ dev_info(ph->dev, "i.MX CPU: name: %s\n", name);
+ } else {
+ dev_err(ph->dev, "i.MX cpu: Failed to get info of cpu(%u)\n", cpuid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_cpu_info *info;
+ u32 version;
+ int ret, i;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM CPU Protocol Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_cpu_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->nr_cpu; i++) {
+ ret = scmi_imx_cpu_attributes_get(ph, i);
+ if (ret)
+ return ret;
+ }
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_cpu = {
+ .id = SCMI_PROTOCOL_IMX_CPU,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_cpu_protocol_init,
+ .ops = &scmi_imx_cpu_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_cpu);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_CPU) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
new file mode 100644
index 000000000000..b519c67fe920
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP LMM Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_lmm_protocol_cmd {
+ SCMI_IMX_LMM_ATTRIBUTES = 0x3,
+ SCMI_IMX_LMM_BOOT = 0x4,
+ SCMI_IMX_LMM_RESET = 0x5,
+ SCMI_IMX_LMM_SHUTDOWN = 0x6,
+ SCMI_IMX_LMM_WAKE = 0x7,
+ SCMI_IMX_LMM_SUSPEND = 0x8,
+ SCMI_IMX_LMM_NOTIFY = 0x9,
+ SCMI_IMX_LMM_RESET_REASON = 0xA,
+ SCMI_IMX_LMM_POWER_ON = 0xB,
+ SCMI_IMX_LMM_RESET_VECTOR_SET = 0xC,
+};
+
+struct scmi_imx_lmm_priv {
+ u32 nr_lmm;
+};
+
+#define SCMI_IMX_LMM_NR_LM_MASK GENMASK(5, 0)
+#define SCMI_IMX_LMM_NR_MAX 16
+struct scmi_msg_imx_lmm_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_lmm_attributes_out {
+ __le32 lmid;
+ __le32 attributes;
+ __le32 state;
+ __le32 errstatus;
+ u8 name[LMM_MAX_NAME];
+};
+
+struct scmi_imx_lmm_reset_vector_set_in {
+ __le32 lmid;
+ __le32 cpuid;
+ __le32 flags; /* reserved for future extension */
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_lmm_shutdown_in {
+ __le32 lmid;
+#define SCMI_IMX_LMM_SHUTDOWN_GRACEFUL BIT(0)
+ __le32 flags;
+};
+
+static int scmi_imx_lmm_validate_lmid(const struct scmi_protocol_handle *ph, u32 lmid)
+{
+ struct scmi_imx_lmm_priv *priv = ph->get_priv(ph);
+
+ if (lmid >= priv->nr_lmm)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_lmm_attributes(const struct scmi_protocol_handle *ph,
+ u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ struct scmi_msg_imx_lmm_attributes_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ info->lmid = le32_to_cpu(out->lmid);
+ info->state = le32_to_cpu(out->state);
+ info->errstatus = le32_to_cpu(out->errstatus);
+ strscpy(info->name, out->name);
+ dev_dbg(ph->dev, "i.MX LMM: Logical Machine(%d), name: %s\n",
+ info->lmid, info->name);
+ } else {
+ dev_err(ph->dev, "i.MX LMM: Failed to get info of Logical Machine(%u)\n", lmid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int
+scmi_imx_lmm_power_boot(const struct scmi_protocol_handle *ph, u32 lmid, bool boot)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ if (boot)
+ msg_id = SCMI_IMX_LMM_BOOT;
+ else
+ msg_id = SCMI_IMX_LMM_POWER_ON;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ struct scmi_imx_lmm_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_shutdown(const struct scmi_protocol_handle *ph, u32 lmid,
+ u32 flags)
+{
+ struct scmi_imx_lmm_shutdown_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_SHUTDOWN, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ if (flags & SCMI_IMX_LMM_SHUTDOWN_GRACEFUL)
+ in->flags = cpu_to_le32(SCMI_IMX_LMM_SHUTDOWN_GRACEFUL);
+ else
+ in->flags = cpu_to_le32(0);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_lmm_proto_ops scmi_imx_lmm_proto_ops = {
+ .lmm_power_boot = scmi_imx_lmm_power_boot,
+ .lmm_info = scmi_imx_lmm_attributes,
+ .lmm_reset_vector_set = scmi_imx_lmm_reset_vector_set,
+ .lmm_shutdown = scmi_imx_lmm_shutdown,
+};
+
+static int scmi_imx_lmm_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_lmm_priv *priv)
+{
+ struct scmi_msg_imx_lmm_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ priv->nr_lmm = le32_get_bits(attr->attributes, SCMI_IMX_LMM_NR_LM_MASK);
+ if (priv->nr_lmm > SCMI_IMX_LMM_NR_MAX) {
+ dev_err(ph->dev, "i.MX LMM: %d:Exceed max supported Logical Machines\n",
+ priv->nr_lmm);
+ ret = -EINVAL;
+ } else {
+ dev_info(ph->dev, "i.MX LMM: %d Logical Machines\n", priv->nr_lmm);
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_lmm_priv *info;
+ u32 version;
+ int ret;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM LMM Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_lmm_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_lmm = {
+ .id = SCMI_PROTOCOL_IMX_LMM,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_lmm_protocol_init,
+ .ops = &scmi_imx_lmm_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_lmm);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_LMM) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
index b2dfd6c46ca2..4e246a78a042 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -32,6 +32,518 @@ port, and deploy the SM on supported processors.
The SM implements an interface compliant with the Arm SCMI Specification
with additional vendor specific extensions.
+System Control and Management Logical Machine Management Vendor Protocol
+========================================================================
+
+The SM adds the concept of logical machines (LMs). These are analogous to
+VMs and each has its own instance of SCMI. All normal SCMI calls only apply
+the LM running the calling agent. That includes boot, shutdown, reset,
+suspend, wake, etc. If a caller makes the SCMI base call to get a list
+of agents, it will only get those on that LM. Each LM is completely isolated
+from the others. This is mandatory for these to operate independently.
+
+This protocol is intended to support boot, shutdown, and reset of other logical
+machines (LM). It is usually used to allow one LM(e.g. OSPM) to manage
+another LM which is usually an offload or accelerator engine. Notifications
+from this protocol can also be used to manage a communication link to another
+LM. The LMM protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover all the LMs defined in the system.
+- Boot a target LM.
+- Shutdown a target LM (gracefully or forcibly).
+- Reset a target LM (gracefully or forcibly).
+- Wake a target LM from suspend.
+- Suspend a target LM (gracefully).
+- Read boot/shutdown/reset information for a target LM.
+- Get notifications when a target LM boots or shuts down (e.g. LM 'X' requested
+ notification of LM 'Y' boots or shuts down, when LM 'Y' boots or shuts down,
+ SCMI firmware will send notification to LM 'X').
+
+'Graceful' means asking LM itself to shutdown/reset/etc (e.g. sending
+notification to Linux, Then Linux reboots or powers down itself). It is async
+command that the SUCCESS of the command just means the command successfully
+return, not means reboot/reset successfully finished.
+
+'Forceful' means the SM will force shutdown/reset/etc the LM. It is sync
+command that the SUCCESS of the command means the LM has been successfully
+shutdown/reset/etc.
+If the commands not have Graceful/Forceful flag settings, such as WAKE, SUSEND,
+it is a Graceful command.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:5] Reserved, must be zero. |
+| |Bits[4:0] Number of Logical Machines |
+| |Note that due to both hardware limitations and reset reason|
+| |field limitations, the max number of LM is 16. The minimum |
+| |is 1. |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+LMM_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x3
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |DENIED: if the agent does not have permission to get info |
+| |for the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier of the LM whose identification is requested. |
+| |This field is: Populated with the lmid of the calling |
+| |agent, when the lmid parameter passed via the command is |
+| |0xFFFFFFFF. Identical to the lmid field passed via the |
+| |calling parameters, in all other cases |
++------------------+-----------------------------------------------------------+
+|uint32 attributes | Bits[31:0] reserved. must be zero |
++------------------+-----------------------------------------------------------+
+|uint32 state | Current state of the LM |
++------------------+-----------------------------------------------------------+
+|uint32 errStatus | Last error status recorded |
++------------------+-----------------------------------------------------------+
+|char name[16] | A NULL terminated ASCII string with the LM name, of up |
+| | to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+LMM_BOOT
+~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM boots successfully started. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET
+~~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM RESET command finished successfully in |
+| |graceful reset or LM successfully resets in forceful reset.|
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SHUTDOWN
+~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM shutdown command finished successfully in |
+| |graceful request or LM successfully shutdown in forceful |
+| |request. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_WAKE
+~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM wake command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SUSPEND
+~~~~~~~~~~~
+
+message_id: 0x8
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM suspend command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_NOTIFY
+~~~~~~~~~~
+
+message_id: 0x9
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Notification flags: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[2] Suspend (sleep) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[1] Shutdown (off) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[0] Boot (on) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the notification state successfully updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if input attributes flag specifies |
+| |unsupported or invalid configurations. |
+| |DENIED: if the agent does not have permission to request |
+| |the notification. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_REASON
+~~~~~~~~~~~~~~~~
+
+message_id: 0xA
+protocol_id: 0x80
+This command is mandatory.
+
+This command is to return the reset reason that caused the last reset, such as
+POR, WDOG, JTAG and etc.
+
++---------------------+--------------------------------------------------------+
+|Parameters |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++---------------------+--------------------------------------------------------+
+|Return values |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|int32 status |SUCCESS: if the reset reason of the LM successfully |
+| |updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine|
+| |DENIED: if the agent does not have permission to request|
+| |the reset reason. |
++---------------------+--------------------------------------------------------+
+|uint32 bootflags |Boot reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Reserved, must be zero. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the system). |
+| |Bit[7:0] Reason(WDOG, POR, FCCU and etc): |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 shutdownflags |Shutdown reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Number of valid extended info words. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the System). |
+| |Bit[7:0] Reason |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 extinfo[3] |Array of extended info words(e.g. fault pc) |
++---------------------+--------------------------------------------------------+
+
+LMM_POWER_ON
+~~~~~~~~~~~~
+
+message_id: 0xB
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM successfully powers on. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x80
+This command is mandatory.
+
++-----------------------+------------------------------------------------------+
+|Parameters |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++-----------------------+------------------------------------------------------+
+|uint32 cpuid |ID of the CPU inside the LM |
++-----------------------+------------------------------------------------------+
+|uint32 flags |Reset vector flags |
+| |Bits[31:0] Reserved, must be zero. |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorHigh |Higher vector |
++-----------------------+------------------------------------------------------+
+|Return values |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|int32 status |SUCCESS: If reset vector is set successfully. |
+| |NOT_FOUND: if lmid not points to a valid logical |
+| |machine, or cpuId is not valid. |
+| |INVALID_PARAMETERS: if reset vector is invalid. |
+| |DENIED: if the agent does not have permission to set |
+| |the reset vector for the CPU in the LM. |
++-----------------------+------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x80
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
+Notifications
+_____________
+
+LMM_EVENT
+~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier for the LM that caused the transition. |
++------------------+-----------------------------------------------------------+
+|uint32 eventlm |Identifier of the LM this event refers to. |
++------------------+-----------------------------------------------------------+
+|uint32 flags |LM events: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) event: |
+| |1 LM has awakened. |
+| |0 not a wake event. |
+| |Bit[2] Suspend (sleep) event: |
+| |1 LM has suspended. |
+| |0 not a suspend event. |
+| |Bit[1] Shutdown (off) event: |
+| |1 LM has shutdown. |
+| |0 not a shutdown event. |
+| |Bit[0] Boot (on) event: |
+| |1 LM has booted. |
+| |0 not a boot event. |
++------------------+-----------------------------------------------------------+
+
SCMI_BBM: System Control and Management BBM Vendor Protocol
==============================================================
@@ -436,6 +948,322 @@ protocol_id: 0x81
| |0 no button change detected. |
+------------------+-----------------------------------------------------------+
+System Control and Management CPU Vendor Protocol
+=================================================
+
+This protocol allows an agent to start or stop a CPU. It is used to manage
+auxiliary CPUs in a target LM (e.g. additional cores in an AP cluster or
+Cortex-M cores).
+Note:
+ - For cores in AP cluster, PSCI should be used and PSCI firmware will use CPU
+ protocol to handle them. For cores in non-AP cluster, Operating System(e.g.
+ Linux OS) could use CPU protocols to control Cortex-M7 cores.
+ - CPU indicates the core and its auxiliary peripherals(e.g. TCM) inside
+ i.MX SoC
+
+There are cases where giving an agent full control of a CPU via the CPU
+protocol is not desired. The LMM protocol is more restricted to just boot,
+shutdown, etc. So an agent might boot another logical machine but not be
+able to directly mess the state of its CPUs. Its also the reason there is an
+LMM power on command even though that could have been done through the
+power protocol.
+
+The CPU protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover the CPUs defined in the system.
+- Start a CPU.
+- Stop a CPU.
+- Set the boot and resume addresses for a CPU.
+- Set the sleep mode of a CPU.
+- Configure wake-up sources for a CPU.
+- Configure power domain reactions (LPM mode and retention mask) for a CPU.
+- The CPU IDs can be found in the CPU section of the SoC DEVICE: SM Device
+ Interface. They can also be found in the SoC RM. See the CPU Mode Control
+ (CMC) list in General Power Controller (GPC) section.
+
+CPU settings are not aggregated and setting their state is normally exclusive
+to one client.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:16] Reserved, must be zero. |
+| |Bits[15:0] Number of CPUs |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+CPU_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully. |
+| |NOT_FOUND: if the cpuid is not valid. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Bits[31:0] Reserved, must be zero |
++------------------+-----------------------------------------------------------+
+|char name[16] |NULL terminated ASCII string with CPU name up to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+CPU_START
+~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to start this CPU.|
++------------------+-----------------------------------------------------------+
+
+CPU_STOP
+~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to stop this CPU. |
++------------------+-----------------------------------------------------------+
+
+CPU_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Reset vector flags: |
+| |Bit[31] Resume flag. |
+| |Set to 1 to update the reset vector used on resume. |
+| |Bit[30] Boot flag. |
+| |Set to 1 to update the reset vector used for boot. |
+| |Bits[29:1] Reserved, must be zero. |
+| |Bit[0] Table flag. |
+| |Set to 1 if vector is the vector table base address. |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector: |
+| |If bit[0] of flags is 0, the lower 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the lower 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorhigh|Upper vector: |
+| |If bit[0] of flags is 0, the upper 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the upper 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU reset vector is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the requested vector type is not |
+| |supported by this CPU. |
+| |DENIED: the calling agent is not allowed to set the |
+| |reset vector of this CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_SLEEP_MODE_SET
+~~~~~~~~~~~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Sleep mode flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] IRQ mux: |
+| |If set to 1 the wakeup mux source is the GIC, else if 0|
+| |then the GPC |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |target sleep mode. When CPU runs into WFI, the GPC mode|
+| |will be triggered to be in below modes: |
+| |RUN: (0) |
+| |WAIT: (1) |
+| |STOP: (2) |
+| |SUSPEND: (3) |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU sleep mode is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the sleepmode or flags is invalid. |
+| |DENIED: the calling agent is not allowed to configure |
+| |the CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_INFO_GET
+~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully.|
+| |NOT_FOUND: if the cpuid is not valid. |
++----------------------+-------------------------------------------------------+
+|uint32 runmode |Run mode for the CPU |
+| |RUN(0):cpu started |
+| |HOLD(1):cpu powered up and reset asserted |
+| |STOP(2):cpu reseted and hold cpu |
+| |SUSPEND(3):in cpuidle state |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |Sleep mode for the CPU, see CPU_SLEEP_MODE_SET |
++----------------------+-------------------------------------------------------+
+|uint32 resetvectorlow |Reset vector low 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 resetvecothigh |Reset vector high 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x82
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
SCMI_MISC: System Control and Management MISC Vendor Protocol
================================================================
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 5fe61b9ab5f9..db8c5c03d3a2 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -281,6 +281,30 @@ config EFI_EMBEDDED_FIRMWARE
bool
select CRYPTO_LIB_SHA256
+config EFI_SBAT
+ def_bool y if EFI_SBAT_FILE!=""
+
+config EFI_SBAT_FILE
+ string "Embedded SBAT section file path"
+ depends on EFI_ZBOOT
+ help
+ SBAT section provides a way to improve SecureBoot revocations of UEFI
+ binaries by introducing a generation-based mechanism. With SBAT, older
+ UEFI binaries can be prevented from booting by bumping the minimal
+ required generation for the specific component in the bootloader.
+
+ Note: SBAT information is distribution specific, i.e. the owner of the
+ signing SecureBoot certificate must define the SBAT policy. Linux
+ kernel upstream does not define SBAT components and their generations.
+
+ See https://github.com/rhboot/shim/blob/main/SBAT.md for the additional
+ details.
+
+ Specify a file with SBAT data which is going to be embedded as '.sbat'
+ section into the kernel.
+
+ If unsure, leave blank.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 48842b5c106b..92e3c73502ba 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -44,6 +44,10 @@ AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE
$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
$(call if_changed_rule,as_o_S)
+ifneq ($(CONFIG_EFI_SBAT_FILE),)
+$(obj)/zboot-header.o: $(CONFIG_EFI_SBAT_FILE)
+endif
+
ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a
LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index fd6dc790c5a8..7aa2f9ad2935 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -601,6 +601,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
* @image: EFI loaded image protocol
* @soft_limit: preferred address for loading the initrd
* @hard_limit: upper limit address for loading the initrd
+ * @out: pointer to store the address of the initrd table
*
* Return: status code
*/
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
index fb676ded47fa..b6431edd0fc9 100644
--- a/drivers/firmware/efi/libstub/zboot-header.S
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -4,17 +4,17 @@
#ifdef CONFIG_64BIT
.set .Lextra_characteristics, 0x0
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR64_MAGIC
#else
.set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR32_MAGIC
#endif
.section ".head", "a"
.globl __efistub_efi_zboot_header
__efistub_efi_zboot_header:
.Ldoshdr:
- .long MZ_MAGIC
+ .long IMAGE_DOS_SIGNATURE
.ascii "zimg" // image type
.long __efistub__gzdata_start - .Ldoshdr // payload offset
.long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
@@ -25,7 +25,7 @@ __efistub_efi_zboot_header:
.long .Lpehdr - .Ldoshdr // PE header offset
.Lpehdr:
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.short MACHINE_TYPE
.short .Lsection_count
.long 0
@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
- .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
+ .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
@@ -123,11 +123,29 @@ __efistub_efi_zboot_header:
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_EXECUTE
+#ifdef CONFIG_EFI_SBAT
+ .ascii ".sbat\0\0\0"
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_DISCARDABLE
+
+ .pushsection ".sbat", "a", @progbits
+ .incbin CONFIG_EFI_SBAT_FILE
+ .popsection
+#endif
+
.ascii ".data\0\0\0"
.long __data_size
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long __data_rawsize
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long 0, 0
.short 0, 0
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index 9ecc57ff5b45..c3a166675450 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -29,7 +29,17 @@ SECTIONS
. = _etext;
}
+#ifdef CONFIG_EFI_SBAT
+ .sbat : ALIGN(4096) {
+ _sbat = .;
+ *(.sbat)
+ _esbat = ALIGN(4096);
+ . = _esbat;
+ }
+#endif
+
.data : ALIGN(4096) {
+ _data = .;
*(.data* .init.data*)
_edata = ALIGN(512);
. = _edata;
@@ -52,3 +62,4 @@ PROVIDE(__efistub__gzdata_size =
PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
PROVIDE(__data_size = ABSOLUTE(_end - _etext));
+PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat));
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 34109fd86c55..f1c04d7cfd71 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -43,7 +43,8 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
map.map = early_memremap(phys_map, data->size);
if (!map.map) {
- pr_err("Could not map the memory map!\n");
+ pr_err("Could not map the memory map! phys_map=%pa, size=0x%lx\n",
+ &phys_map, data->size);
return -ENOMEM;
}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 9e2628728aad..77b5f7ac3e20 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -361,6 +361,10 @@ static long efi_runtime_get_waketime(unsigned long arg)
getwakeuptime.enabled))
return -EFAULT;
+ if (getwakeuptime.pending && put_user(pending,
+ getwakeuptime.pending))
+ return -EFAULT;
+
if (getwakeuptime.time) {
if (copy_to_user(getwakeuptime.time, &efi_time,
sizeof(efi_time_t)))
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index c964f4924359..127ad752acf8 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -23,6 +23,28 @@ config IMX_SCU
This driver manages the IPC interface between host CPU and the
SCU firmware running on M4.
+config IMX_SCMI_CPU_DRV
+ tristate "IMX SCMI CPU Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide cpu management features.
+
+ This driver can also be built as a module.
+
+config IMX_SCMI_LMM_DRV
+ tristate "IMX SCMI LMM Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide Logical Machine management features.
+
+ This driver can also be built as a module.
+
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 8d046c341be8..3bbaffa6e347 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_DSP) += imx-dsp.o
obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
+obj-${CONFIG_IMX_SCMI_CPU_DRV} += sm-cpu.o
obj-${CONFIG_IMX_SCMI_MISC_DRV} += sm-misc.o
+obj-${CONFIG_IMX_SCMI_LMM_DRV} += sm-lmm.o
diff --git a/drivers/firmware/imx/sm-cpu.c b/drivers/firmware/imx/sm-cpu.c
new file mode 100644
index 000000000000..091b014f739f
--- /dev/null
+++ b/drivers/firmware/imx/sm-cpu.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_cpu_proto_ops *imx_cpu_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot,
+ bool resume)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_cpu_ops->cpu_reset_vector_set(ph, cpuid, vector, start,
+ boot, resume);
+}
+EXPORT_SYMBOL(scmi_imx_cpu_reset_vector_set);
+
+int scmi_imx_cpu_start(u32 cpuid, bool start)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (start)
+ return imx_cpu_ops->cpu_start(ph, cpuid, true);
+
+ return imx_cpu_ops->cpu_start(ph, cpuid, false);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_start);
+
+int scmi_imx_cpu_started(u32 cpuid, bool *started)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!started)
+ return -EINVAL;
+
+ return imx_cpu_ops->cpu_started(ph, cpuid, started);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_started);
+
+static int scmi_imx_cpu_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_cpu_ops) {
+ dev_err(&sdev->dev, "sm cpu already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_cpu_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_CPU, &ph);
+ if (IS_ERR(imx_cpu_ops))
+ return PTR_ERR(imx_cpu_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_CPU, "imx-cpu" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_cpu_driver = {
+ .name = "scmi-imx-cpu",
+ .probe = scmi_imx_cpu_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_cpu_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/imx/sm-lmm.c b/drivers/firmware/imx/sm-lmm.c
new file mode 100644
index 000000000000..6807bf563c03
--- /dev/null
+++ b/drivers/firmware/imx/sm-lmm.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_lmm_proto_ops *imx_lmm_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!info)
+ return -EINVAL;
+
+ return imx_lmm_ops->lmm_info(ph, lmid, info);
+};
+EXPORT_SYMBOL(scmi_imx_lmm_info);
+
+int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_lmm_ops->lmm_reset_vector_set(ph, lmid, cpuid, flags, vector);
+}
+EXPORT_SYMBOL(scmi_imx_lmm_reset_vector_set);
+
+int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ switch (op) {
+ case SCMI_IMX_LMM_BOOT:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, true);
+ case SCMI_IMX_LMM_POWER_ON:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, false);
+ case SCMI_IMX_LMM_SHUTDOWN:
+ return imx_lmm_ops->lmm_shutdown(ph, lmid, flags);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(scmi_imx_lmm_operation);
+
+static int scmi_imx_lmm_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_lmm_ops) {
+ dev_err(&sdev->dev, "lmm already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_lmm_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_LMM, &ph);
+ if (IS_ERR(imx_lmm_ops))
+ return PTR_ERR(imx_lmm_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_LMM, "imx-lmm" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_lmm_driver = {
+ .name = "scmi-imx-lmm",
+ .probe = scmi_imx_lmm_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_lmm_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index fc4d67e4c4a6..f63b716be5b0 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1986,7 +1986,10 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "asus,vivobook-s15" },
+ { .compatible = "asus,zenbook-a14-ux3407qa" },
+ { .compatible = "asus,zenbook-a14-ux3407ra" },
{ .compatible = "dell,xps13-9345" },
+ { .compatible = "hp,elitebook-ultra-g1q" },
{ .compatible = "hp,omnibook-x14" },
{ .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 097369d38b84..3133d826f5fa 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -44,8 +44,11 @@ enum qcom_scm_arg_types {
/**
* struct qcom_scm_desc
+ * @svc: Service identifier
+ * @cmd: Command identifier
* @arginfo: Metadata describing the arguments in args[]
* @args: The array of arguments for the secure syscall
+ * @owner: Owner identifier
*/
struct qcom_scm_desc {
u32 svc;
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index 92b365178235..94196ad87105 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -79,6 +79,7 @@ static const char *const qcom_tzmem_blacklist[] = {
"qcom,sc8180x",
"qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
+ "qcom,sm7150", /* reset in rmtfs memory assignment */
"qcom,sm8150", /* reset in rmtfs memory assignment */
NULL
};
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
index 85e90d236da2..39b33a356ebd 100644
--- a/drivers/firmware/samsung/exynos-acpm-pmic.c
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -43,13 +43,13 @@ static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
}
-static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd,
+static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd, size_t cmdlen,
unsigned int acpm_chan_id)
{
xfer->txd = cmd;
xfer->rxd = cmd;
- xfer->txlen = sizeof(cmd);
- xfer->rxlen = sizeof(cmd);
+ xfer->txlen = cmdlen;
+ xfer->rxlen = cmdlen;
xfer->acpm_chan_id = acpm_chan_id;
}
@@ -71,7 +71,7 @@ int acpm_pmic_read_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_read_cmd(cmd, type, reg, chan);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -104,7 +104,7 @@ int acpm_pmic_bulk_read(const struct acpm_handle *handle,
return -EINVAL;
acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -144,7 +144,7 @@ int acpm_pmic_write_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -184,7 +184,7 @@ int acpm_pmic_bulk_write(const struct acpm_handle *handle,
return -EINVAL;
acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -214,7 +214,7 @@ int acpm_pmic_update_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
index 15e991b99f5a..e02f14f4bd7c 100644
--- a/drivers/firmware/samsung/exynos-acpm.c
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -15,6 +15,7 @@
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/ktime.h>
#include <linux/mailbox/exynos-message.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
@@ -32,8 +33,7 @@
#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
-/* The unit of counter is 20 us. 5000 * 20 = 100 ms */
-#define ACPM_POLL_TIMEOUT 5000
+#define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC)
#define ACPM_TX_TIMEOUT_US 500000
#define ACPM_GS101_INITDATA_BASE 0xa000
@@ -300,12 +300,13 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan,
const struct acpm_xfer *xfer)
{
struct device *dev = achan->acpm->dev;
- unsigned int cnt_20us = 0;
+ ktime_t timeout;
u32 seqnum;
int ret;
seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+ timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US);
do {
ret = acpm_get_rx(achan, xfer);
if (ret)
@@ -315,12 +316,11 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan,
return 0;
/* Determined experimentally. */
- usleep_range(20, 30);
- cnt_20us++;
- } while (cnt_20us < ACPM_POLL_TIMEOUT);
+ udelay(20);
+ } while (ktime_before(ktime_get(), timeout));
- dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
- achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
+ dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n",
+ achan->id, seqnum, achan->bitmap_seqnum[0]);
return -ETIME;
}
@@ -649,7 +649,7 @@ static int acpm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, acpm);
- return 0;
+ return devm_of_platform_populate(dev);
}
/**
@@ -677,43 +677,30 @@ static void devm_acpm_release(struct device *dev, void *res)
}
/**
- * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
- * @dev: device pointer requesting ACPM handle.
- * @property: property name containing phandle on ACPM node.
+ * acpm_get_by_node() - get the ACPM handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
-static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
- const char *property)
+static const struct acpm_handle *acpm_get_by_node(struct device *dev,
+ struct device_node *np)
{
struct platform_device *pdev;
- struct device_node *acpm_np;
struct device_link *link;
struct acpm_info *acpm;
- acpm_np = of_parse_phandle(dev->of_node, property, 0);
- if (!acpm_np)
- return ERR_PTR(-ENODEV);
-
- pdev = of_find_device_by_node(acpm_np);
- if (!pdev) {
- dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
- of_node_put(acpm_np);
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
- }
-
- of_node_put(acpm_np);
acpm = platform_get_drvdata(pdev);
if (!acpm) {
- dev_err(dev, "Cannot get drvdata from %s\n",
- dev_name(&pdev->dev));
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
if (!try_module_get(pdev->dev.driver->owner)) {
- dev_err(dev, "Cannot get module reference.\n");
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -732,14 +719,14 @@ static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
}
/**
- * devm_acpm_get_by_phandle() - managed get handle using phandle.
- * @dev: device pointer requesting ACPM handle.
- * @property: property name containing phandle on ACPM node.
+ * devm_acpm_get_by_node() - managed get handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
-const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
- const char *property)
+const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np)
{
const struct acpm_handle **ptr, *handle;
@@ -747,7 +734,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- handle = acpm_get_by_phandle(dev, property);
+ handle = acpm_get_by_node(dev, np);
if (!IS_ERR(handle)) {
*ptr = handle;
devres_add(dev, ptr);
@@ -757,6 +744,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
return handle;
}
+EXPORT_SYMBOL_GPL(devm_acpm_get_by_node);
static const struct acpm_match_data acpm_gs101 = {
.initdata_base = ACPM_GS101_INITDATA_BASE,
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index a123c05cbc9e..49e1de83d2e8 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -17,17 +17,11 @@ static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_afte
void __init kvm_init_hyp_services(void)
{
+ uuid_t kvm_uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM;
struct arm_smccc_res res;
u32 val[4];
- if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
- return;
-
- arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
- if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 ||
- res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 ||
- res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 ||
- res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3)
+ if (!arm_smccc_hypervisor_has_uuid(&kvm_uuid))
return;
memset(&res, 0, sizeof(res));
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index a74600d9f2d7..cd65b434dc6e 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -67,6 +67,23 @@ s32 arm_smccc_get_soc_id_revision(void)
}
EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision);
+bool arm_smccc_hypervisor_has_uuid(const uuid_t *hyp_uuid)
+{
+ struct arm_smccc_res res = {};
+ uuid_t uuid;
+
+ if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
+ return false;
+
+ arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return false;
+
+ uuid = smccc_res_to_uuid(res.a0, res.a1, res.a2, res.a3);
+ return uuid_equal(&uuid, hyp_uuid);
+}
+EXPORT_SYMBOL_GPL(arm_smccc_hypervisor_has_uuid);
+
static int __init smccc_devices_init(void)
{
struct platform_device *pdev;
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 806a975fff22..ae5fd1936ad3 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2025 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -3670,6 +3670,7 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
struct ti_sci_info *info = dev_get_drvdata(dev);
struct device *cpu_dev, *cpu_dev_max = NULL;
s32 val, cpu_lat = 0;
+ u16 cpu_lat_ms;
int i, ret;
if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
@@ -3682,9 +3683,16 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
}
}
if (cpu_dev_max) {
- dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u\n", __func__, cpu_lat);
+ /*
+ * PM QoS latency unit is usecs, device manager uses msecs.
+ * Convert to msecs and round down for device manager.
+ */
+ cpu_lat_ms = cpu_lat / USEC_PER_MSEC;
+ dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u ms\n", __func__,
+ cpu_lat_ms);
ret = ti_sci_cmd_set_latency_constraint(&info->handle,
- cpu_lat, TISCI_MSG_CONSTRAINT_SET);
+ cpu_lat_ms,
+ TISCI_MSG_CONSTRAINT_SET);
if (ret)
return ret;
}
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 47fe6261f5a3..1eac9948148f 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -2,29 +2,31 @@
/*
* Turris Mox rWTM firmware driver
*
- * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org>
+ * Copyright (C) 2019, 2024, 2025 Marek Behún <kabel@kernel.org>
*/
#include <crypto/sha2.h>
#include <linux/align.h>
#include <linux/armada-37xx-rwtm-mailbox.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/container_of.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/if_ether.h>
+#include <linux/key.h>
#include <linux/kobject.h>
#include <linux/mailbox_client.h>
+#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/sysfs.h>
+#include <linux/turris-signing-key.h>
#include <linux/types.h>
#define DRIVER_NAME "turris-mox-rwtm"
@@ -37,10 +39,13 @@
* https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
*/
-#define MOX_ECC_NUMBER_WORDS 17
-#define MOX_ECC_NUMBER_LEN (MOX_ECC_NUMBER_WORDS * sizeof(u32))
-
-#define MOX_ECC_SIGNATURE_WORDS (2 * MOX_ECC_NUMBER_WORDS)
+enum {
+ MOX_ECC_NUM_BITS = 521,
+ MOX_ECC_NUM_LEN = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 8),
+ MOX_ECC_NUM_WORDS = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 32),
+ MOX_ECC_SIG_LEN = 2 * MOX_ECC_NUM_LEN,
+ MOX_ECC_PUBKEY_LEN = 1 + MOX_ECC_NUM_LEN,
+};
#define MBOX_STS_SUCCESS (0 << 30)
#define MBOX_STS_FAIL (1 << 30)
@@ -77,10 +82,7 @@ enum mbox_cmd {
* @ram_size: RAM size of the device
* @mac_address1: first MAC address of the device
* @mac_address2: second MAC address of the device
- * @has_pubkey: whether board ECDSA public key is present
* @pubkey: board ECDSA public key
- * @last_sig: last ECDSA signature generated with board ECDSA private key
- * @last_sig_done: whether the last ECDSA signing is complete
*/
struct mox_rwtm {
struct mbox_client mbox_client;
@@ -100,18 +102,8 @@ struct mox_rwtm {
int board_version, ram_size;
u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN];
- bool has_pubkey;
- u8 pubkey[135];
-
-#ifdef CONFIG_DEBUG_FS
- /*
- * Signature process. This is currently done via debugfs, because it
- * does not conform to the sysfs standard "one file per attribute".
- * It should be rewritten via crypto API once akcipher API is available
- * from userspace.
- */
- u32 last_sig[MOX_ECC_SIGNATURE_WORDS];
- bool last_sig_done;
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
+ u8 pubkey[MOX_ECC_PUBKEY_LEN];
#endif
};
@@ -120,24 +112,23 @@ static inline struct device *rwtm_dev(struct mox_rwtm *rwtm)
return rwtm->mbox_client.dev;
}
-#define MOX_ATTR_RO(name, format, cat) \
+#define MOX_ATTR_RO(name, format) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *a, \
char *buf) \
{ \
struct mox_rwtm *rwtm = dev_get_drvdata(dev); \
- if (!rwtm->has_##cat) \
+ if (!rwtm->has_board_info) \
return -ENODATA; \
return sysfs_emit(buf, format, rwtm->name); \
} \
static DEVICE_ATTR_RO(name)
-MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
-MOX_ATTR_RO(board_version, "%i\n", board_info);
-MOX_ATTR_RO(ram_size, "%i\n", board_info);
-MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
-MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
-MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+MOX_ATTR_RO(serial_number, "%016llX\n");
+MOX_ATTR_RO(board_version, "%i\n");
+MOX_ATTR_RO(ram_size, "%i\n");
+MOX_ATTR_RO(mac_address1, "%pM\n");
+MOX_ATTR_RO(mac_address2, "%pM\n");
static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_serial_number.attr,
@@ -145,7 +136,6 @@ static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_ram_size.attr,
&dev_attr_mac_address1.attr,
&dev_attr_mac_address2.attr,
- &dev_attr_pubkey.attr,
NULL
};
ATTRIBUTE_GROUPS(turris_mox_rwtm);
@@ -247,24 +237,6 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
}
- ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
- if (ret == -ENODATA) {
- dev_warn(dev, "Board has no public key burned!\n");
- } else if (ret == -EOPNOTSUPP) {
- dev_notice(dev,
- "Firmware does not support the ECDSA_PUB_KEY command\n");
- } else if (ret < 0) {
- return ret;
- } else {
- u32 *s = reply->status;
-
- rwtm->has_pubkey = true;
- sprintf(rwtm->pubkey,
- "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
- ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
- s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
- }
-
return 0;
}
@@ -306,127 +278,139 @@ unlock_mutex:
return ret;
}
-#ifdef CONFIG_DEBUG_FS
-static int rwtm_debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
- return nonseekable_open(inode, file);
-}
-
-static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
+static void mox_ecc_number_to_bin(void *dst, const u32 *src)
{
- struct mox_rwtm *rwtm = file->private_data;
- ssize_t ret;
+ __be32 tmp[MOX_ECC_NUM_WORDS];
- /* only allow one read, of whole signature, from position 0 */
- if (*ppos != 0)
- return 0;
+ cpu_to_be32_array(tmp, src, MOX_ECC_NUM_WORDS);
- if (len < sizeof(rwtm->last_sig))
- return -EINVAL;
+ memcpy(dst, (void *)tmp + 2, MOX_ECC_NUM_LEN);
+}
- if (!rwtm->last_sig_done)
- return -ENODATA;
+static void mox_ecc_public_key_to_bin(void *dst, u32 src_first,
+ const u32 *src_rest)
+{
+ __be32 tmp[MOX_ECC_NUM_WORDS - 1];
+ u8 *p = dst;
- ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig,
- sizeof(rwtm->last_sig));
- rwtm->last_sig_done = false;
+ /* take 3 bytes from the first word */
+ *p++ = src_first >> 16;
+ *p++ = src_first >> 8;
+ *p++ = src_first;
- return ret;
+ /* take the rest of the words */
+ cpu_to_be32_array(tmp, src_rest, MOX_ECC_NUM_WORDS - 1);
+ memcpy(p, tmp, sizeof(tmp));
}
-static ssize_t do_sign_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+static int mox_rwtm_sign(const struct key *key, const void *data, void *signature)
{
- struct mox_rwtm *rwtm = file->private_data;
- struct armada_37xx_rwtm_tx_msg msg;
- loff_t dummy = 0;
- ssize_t ret;
-
- if (len != SHA512_DIGEST_SIZE)
- return -EINVAL;
-
- /* if last result is not zero user has not read that information yet */
- if (rwtm->last_sig_done)
- return -EBUSY;
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+ struct armada_37xx_rwtm_tx_msg msg = {};
+ u32 offset_r, offset_s;
+ int ret;
- if (!mutex_trylock(&rwtm->busy))
- return -EBUSY;
+ guard(mutex)(&rwtm->busy);
/*
- * Here we have to send:
- * 1. Address of the input to sign.
- * The input is an array of 17 32-bit words, the first (most
- * significat) is 0, the rest 16 words are copied from the SHA-512
- * hash given by the user and converted from BE to LE.
- * 2. Address of the buffer where ECDSA signature value R shall be
- * stored by the rWTM firmware.
- * 3. Address of the buffer where ECDSA signature value S shall be
- * stored by the rWTM firmware.
+ * For MBOX_CMD_SIGN command:
+ * args[0] - must be 1
+ * args[1] - address of message M to sign; message is a 521-bit number
+ * args[2] - address where the R part of the signature will be stored
+ * args[3] - address where the S part of the signature will be stored
+ *
+ * M, R and S are 521-bit numbers encoded as seventeen 32-bit words,
+ * most significat word first.
+ * Since the message in @data is a sha512 digest, the most significat
+ * word is always zero.
*/
+
+ offset_r = MOX_ECC_NUM_WORDS * sizeof(u32);
+ offset_s = 2 * MOX_ECC_NUM_WORDS * sizeof(u32);
+
memset(rwtm->buf, 0, sizeof(u32));
- ret = simple_write_to_buffer(rwtm->buf + sizeof(u32),
- SHA512_DIGEST_SIZE, &dummy, buf, len);
- if (ret < 0)
- goto unlock_mutex;
- be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUMBER_WORDS);
+ memcpy(rwtm->buf + sizeof(u32), data, SHA512_DIGEST_SIZE);
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUM_WORDS);
msg.args[0] = 1;
msg.args[1] = rwtm->buf_phys;
- msg.args[2] = rwtm->buf_phys + MOX_ECC_NUMBER_LEN;
- msg.args[3] = rwtm->buf_phys + 2 * MOX_ECC_NUMBER_LEN;
+ msg.args[2] = rwtm->buf_phys + offset_r;
+ msg.args[3] = rwtm->buf_phys + offset_s;
ret = mox_rwtm_exec(rwtm, MBOX_CMD_SIGN, &msg, true);
if (ret < 0)
- goto unlock_mutex;
+ return ret;
- /*
- * Here we read the R and S values of the ECDSA signature
- * computed by the rWTM firmware and convert their words from
- * LE to BE.
- */
- memcpy(rwtm->last_sig, rwtm->buf + MOX_ECC_NUMBER_LEN,
- sizeof(rwtm->last_sig));
- cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig,
- MOX_ECC_SIGNATURE_WORDS);
- rwtm->last_sig_done = true;
+ /* convert R and S parts of the signature */
+ mox_ecc_number_to_bin(signature, rwtm->buf + offset_r);
+ mox_ecc_number_to_bin(signature + MOX_ECC_NUM_LEN, rwtm->buf + offset_s);
- mutex_unlock(&rwtm->busy);
- return len;
-unlock_mutex:
- mutex_unlock(&rwtm->busy);
- return ret;
+ return 0;
}
-static const struct file_operations do_sign_fops = {
- .owner = THIS_MODULE,
- .open = rwtm_debug_open,
- .read = do_sign_read,
- .write = do_sign_write,
-};
-
-static void rwtm_debugfs_release(void *root)
+static const void *mox_rwtm_get_public_key(const struct key *key)
{
- debugfs_remove_recursive(root);
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+
+ return rwtm->pubkey;
}
-static void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+static const struct turris_signing_key_subtype mox_signing_key_subtype = {
+ .key_size = MOX_ECC_NUM_BITS,
+ .data_size = SHA512_DIGEST_SIZE,
+ .sig_size = MOX_ECC_SIG_LEN,
+ .public_key_size = MOX_ECC_PUBKEY_LEN,
+ .hash_algo = "sha512",
+ .get_public_key = mox_rwtm_get_public_key,
+ .sign = mox_rwtm_sign,
+};
+
+static int mox_register_signing_key(struct mox_rwtm *rwtm)
{
- struct dentry *root;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ struct device *dev = rwtm_dev(rwtm);
+ int ret;
- root = debugfs_create_dir("turris-mox-rwtm", NULL);
+ ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
+ if (ret == -ENODATA) {
+ dev_warn(dev, "Board has no public key burned!\n");
+ } else if (ret == -EOPNOTSUPP) {
+ dev_notice(dev,
+ "Firmware does not support the ECDSA_PUB_KEY command\n");
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ char sn[17] = "unknown";
+ char desc[46];
+
+ if (rwtm->has_board_info)
+ sprintf(sn, "%016llX", rwtm->serial_number);
+
+ sprintf(desc, "Turris MOX SN %s rWTM ECDSA key", sn);
- debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, &do_sign_fops);
+ mox_ecc_public_key_to_bin(rwtm->pubkey, ret, reply->status);
- devm_add_action_or_reset(rwtm_dev(rwtm), rwtm_debugfs_release, root);
+ ret = devm_turris_signing_key_create(dev,
+ &mox_signing_key_subtype,
+ desc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot create signing key\n");
+ }
+
+ return 0;
}
-#else
-static inline void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+
+#else /* CONFIG_TURRIS_MOX_RWTM_KEYCTL */
+
+static inline int mox_register_signing_key(struct mox_rwtm *rwtm)
{
+ return 0;
}
-#endif
+
+#endif /* !CONFIG_TURRIS_MOX_RWTM_KEYCTL */
static void rwtm_devm_mbox_release(void *mbox)
{
@@ -477,6 +461,10 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret < 0)
dev_warn(dev, "Cannot read board information: %i\n", ret);
+ ret = mox_register_signing_key(rwtm);
+ if (ret < 0)
+ return ret;
+
ret = check_get_random_support(rwtm);
if (ret < 0) {
dev_notice(dev,
@@ -491,8 +479,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Cannot register HWRNG!\n");
- rwtm_register_debugfs(rwtm);
-
dev_info(dev, "HWRNG successfully registered\n");
/*
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 76e2801619f0..c33bd3d83069 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -100,7 +100,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
r.mm = vma->vm_mm;
r.pfn = pfn;
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
@@ -140,7 +140,7 @@ int remap_io_sg(struct vm_area_struct *vma,
};
int err;
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
while (offset >= r.sgt.max >> PAGE_SHIFT) {
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index 94a8f902689e..bfe98cb9a038 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -9,76 +9,19 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
-/**
- * REG_BIT() - Prepare a u32 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u32, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT(__n) \
- ((u32)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 31))))
-
-/**
- * REG_BIT8() - Prepare a u8 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u8, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT8(__n) \
- ((u8)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 7))))
-
-/**
- * REG_GENMASK() - Prepare a continuous u32 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u32, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK(__high, __low) \
- ((u32)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
-
-/**
- * REG_GENMASK64() - Prepare a continuous u64 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK_ULL() to force u64, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
+/*
+ * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
+ * implementations, for compatibility reasons with previous implementation.
*/
-#define REG_GENMASK64(__high, __low) \
- ((u64)(GENMASK_ULL(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
+#define REG_GENMASK(high, low) GENMASK_U32(high, low)
+#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
+#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
+#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
-/**
- * REG_GENMASK8() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u8, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK8(__high, __low) \
- ((u8)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))
+#define REG_BIT(n) BIT_U32(n)
+#define REG_BIT64(n) BIT_U64(n)
+#define REG_BIT16(n) BIT_U16(n)
+#define REG_BIT8(n) BIT_U8(n)
/*
* Local integer constant expression version of is_power_of_2().
@@ -143,35 +86,6 @@
*/
#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
-/**
- * REG_BIT16() - Prepare a u16 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u16, with compile time
- * checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT16(__n) \
- ((u16)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 15))))
-
-/**
- * REG_GENMASK16() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u16, with compile time
- * checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK16(__high, __low) \
- ((u16)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 15 || (__low) > (__high)))))
/**
* REG_FIELD_PREP16() - Prepare a u16 bitfield value
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 210a25afe82b..d92ae6b6100f 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2020 Caleb Connolly <caleb@connolly.tech>
+/* Copyright (c) 2020 Casey Connolly <casey.connolly@linaro.org>
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
@@ -260,6 +260,6 @@ static struct mipi_dsi_driver sofef00_panel_driver = {
module_mipi_dsi_driver(sofef00_panel_driver);
-MODULE_AUTHOR("Caleb Connolly <caleb@connolly.tech>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 4dbf8b88f264..11d460d2ea19 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,11 +86,7 @@ struct td028ttec1_panel {
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
-/*
- * noinline_for_stack so we don't get multiple copies of tx_buf
- * on the stack in case of gcc-plugin-structleak
- */
-static int noinline_for_stack
+static int
jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
{
struct spi_device *spi = lcd->spi;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a503252702b7..43859fc75747 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -151,6 +151,7 @@ config HID_APPLEIR
config HID_APPLETB_BL
tristate "Apple Touch Bar Backlight"
depends on BACKLIGHT_CLASS_DEVICE
+ depends on X86 || COMPILE_TEST
help
Say Y here if you want support for the backlight of Touch Bars on x86
MacBook Pros.
@@ -163,6 +164,7 @@ config HID_APPLETB_KBD
depends on USB_HID
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
+ depends on X86 || COMPILE_TEST
select INPUT_SPARSEKMAP
select HID_APPLETB_BL
help
diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c
index 029ccbaa1d12..ef51b2c06872 100644
--- a/drivers/hid/hid-appletb-kbd.c
+++ b/drivers/hid/hid-appletb-kbd.c
@@ -172,7 +172,8 @@ static void appletb_inactivity_timer(struct timer_list *t)
if (!kbd->has_dimmed) {
backlight_device_set_brightness(kbd->backlight_dev, 1);
kbd->has_dimmed = true;
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_idle_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_idle_timeout));
} else if (!kbd->has_turned_off) {
backlight_device_set_brightness(kbd->backlight_dev, 0);
kbd->has_turned_off = true;
@@ -188,7 +189,8 @@ static void reset_inactivity_timer(struct appletb_kbd *kbd)
kbd->has_dimmed = false;
kbd->has_turned_off = false;
}
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_dim_timeout));
}
}
@@ -407,7 +409,8 @@ static int appletb_kbd_probe(struct hid_device *hdev, const struct hid_device_id
} else {
backlight_device_set_brightness(kbd->backlight_dev, 2);
timer_setup(&kbd->inactivity_timer, appletb_inactivity_timer, 0);
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_dim_timeout));
}
kbd->inp_handler.event = appletb_kbd_inp_event;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 4741ff626771..b348d0464314 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2396,6 +2396,9 @@ int hid_hw_open(struct hid_device *hdev)
ret = hdev->ll_driver->open(hdev);
if (ret)
hdev->ll_open_count--;
+
+ if (hdev->driver->on_hid_hw_open)
+ hdev->driver->on_hid_hw_open(hdev);
}
mutex_unlock(&hdev->ll_open_lock);
@@ -2415,8 +2418,12 @@ EXPORT_SYMBOL_GPL(hid_hw_open);
void hid_hw_close(struct hid_device *hdev)
{
mutex_lock(&hdev->ll_open_lock);
- if (!--hdev->ll_open_count)
+ if (!--hdev->ll_open_count) {
hdev->ll_driver->close(hdev);
+
+ if (hdev->driver->on_hid_hw_close)
+ hdev->driver->on_hid_hw_close(hdev);
+ }
mutex_unlock(&hdev->ll_open_lock);
}
EXPORT_SYMBOL_GPL(hid_hw_close);
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index afbd67aa9719..fee134a7eba3 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -507,7 +507,7 @@ static void corsair_void_status_work_handler(struct work_struct *work)
struct delayed_work *delayed_work;
int battery_ret;
- delayed_work = container_of(work, struct delayed_work, work);
+ delayed_work = to_delayed_work(work);
drvdata = container_of(delayed_work, struct corsair_void_drvdata,
delayed_status_work);
@@ -525,7 +525,7 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
struct delayed_work *delayed_work;
int firmware_ret;
- delayed_work = container_of(work, struct delayed_work, work);
+ delayed_work = to_delayed_work(work);
drvdata = container_of(delayed_work, struct corsair_void_drvdata,
delayed_firmware_work);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f4c8d981aa0a..234fa82eab07 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -17,11 +17,13 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/hidraw.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/nls.h>
#include <linux/string_choices.h>
#include <linux/usb/ch9.h>
@@ -185,7 +187,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -194,7 +196,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
if (ret >= 0)
ret = -EIO;
- goto exit;
+ return ret;
}
buf[1] &= ~BIT(offset);
@@ -207,25 +209,19 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
hid_err(hdev, "error setting GPIO config: %d\n", ret);
if (ret >= 0)
ret = -EIO;
- goto exit;
+ return ret;
}
- ret = 0;
-
-exit:
- mutex_unlock(&dev->lock);
- return ret;
+ return 0;
}
-static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int cp2112_gpio_set_unlocked(struct cp2112_device *dev,
+ unsigned int offset, int value)
{
- struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
-
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? CP2112_GPIO_ALL_GPIO_MASK : 0;
buf[2] = BIT(offset);
@@ -236,7 +232,17 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
- mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+
+ guard(mutex)(&dev->lock);
+
+ return cp2112_gpio_set_unlocked(dev, offset, value);
}
static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -246,23 +252,17 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_GET_LENGTH) {
hid_err(hdev, "error requesting GPIO values: %d\n", ret);
- ret = ret < 0 ? ret : -EIO;
- goto exit;
+ return ret < 0 ? ret : -EIO;
}
- ret = buf[1];
-
-exit:
- mutex_unlock(&dev->lock);
-
- return ret;
+ return buf[1];
}
static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -284,14 +284,14 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- goto fail;
+ return ret < 0 ? ret : -EIO;
}
buf[1] |= 1 << offset;
@@ -302,22 +302,16 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- goto fail;
+ return ret;
}
- mutex_unlock(&dev->lock);
-
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
- cp2112_gpio_set(chip, offset, value);
+ cp2112_gpio_set_unlocked(dev, offset, value);
return 0;
-
-fail:
- mutex_unlock(&dev->lock);
- return ret < 0 ? ret : -EIO;
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1205,7 +1199,11 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!dev->in_out_buffer)
return -ENOMEM;
- mutex_init(&dev->lock);
+ ret = devm_mutex_init(&hdev->dev, &dev->lock);
+ if (ret) {
+ hid_err(hdev, "mutex init failed\n");
+ return ret;
+ }
ret = hid_parse(hdev);
if (ret) {
@@ -1290,7 +1288,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.label = "cp2112_gpio";
dev->gc.direction_input = cp2112_gpio_direction_input;
dev->gc.direction_output = cp2112_gpio_direction_output;
- dev->gc.set = cp2112_gpio_set;
+ dev->gc.set_rv = cp2112_gpio_set;
dev->gc.get = cp2112_gpio_get;
dev->gc.base = -1;
dev->gc.ngpio = CP2112_GPIO_MAX_GPIO;
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 0fb210e40a41..9eafff0b6ea4 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -192,7 +192,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
goto cleanup;
input_device->report_desc_size = le16_to_cpu(
- desc->desc[0].wDescriptorLength);
+ desc->rpt_desc.wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@@ -210,7 +210,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
- le16_to_cpu(desc->desc[0].wDescriptorLength));
+ le16_to_cpu(desc->rpt_desc.wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 1062731315a2..e3fb4e2fe911 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -96,6 +96,7 @@
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269
+#define USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC 0x0323
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC 0x0324
diff --git a/drivers/hid/hid-kysona.c b/drivers/hid/hid-kysona.c
index d4c0406b3323..09bfe30d02cb 100644
--- a/drivers/hid/hid-kysona.c
+++ b/drivers/hid/hid-kysona.c
@@ -14,6 +14,7 @@
#define BATTERY_TIMEOUT_MS 5000
+#define ONLINE_REPORT_ID 3
#define BATTERY_REPORT_ID 4
struct kysona_drvdata {
@@ -80,11 +81,46 @@ static int kysona_battery_get_property(struct power_supply *psy,
return ret;
}
+static const char kysona_online_request[] = {
+ 0x08, ONLINE_REPORT_ID, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a
+};
+
static const char kysona_battery_request[] = {
0x08, BATTERY_REPORT_ID, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49
};
+static int kysona_m600_fetch_online(struct hid_device *hdev)
+{
+ u8 *write_buf;
+ int ret;
+
+ /* Request online information */
+ write_buf = kmemdup(kysona_online_request, sizeof(kysona_online_request), GFP_KERNEL);
+ if (!write_buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, kysona_online_request[0],
+ write_buf, sizeof(kysona_online_request),
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ if (ret < (int)sizeof(kysona_online_request)) {
+ hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
+ ret = -ENODATA;
+ }
+ kfree(write_buf);
+ return ret;
+}
+
+static void kysona_fetch_online(struct hid_device *hdev)
+{
+ int ret = kysona_m600_fetch_online(hdev);
+
+ if (ret < 0)
+ hid_dbg(hdev,
+ "Online query failed (err: %d)\n", ret);
+}
+
static int kysona_m600_fetch_battery(struct hid_device *hdev)
{
u8 *write_buf;
@@ -121,6 +157,7 @@ static void kysona_battery_timer_tick(struct work_struct *work)
struct kysona_drvdata, battery_work.work);
struct hid_device *hdev = drv_data->hdev;
+ kysona_fetch_online(hdev);
kysona_fetch_battery(hdev);
schedule_delayed_work(&drv_data->battery_work,
msecs_to_jiffies(BATTERY_TIMEOUT_MS));
@@ -160,6 +197,7 @@ static int kysona_battery_probe(struct hid_device *hdev)
power_supply_powers(drv_data->battery, &hdev->dev);
INIT_DELAYED_WORK(&drv_data->battery_work, kysona_battery_timer_tick);
+ kysona_fetch_online(hdev);
kysona_fetch_battery(hdev);
schedule_delayed_work(&drv_data->battery_work,
msecs_to_jiffies(BATTERY_TIMEOUT_MS));
@@ -206,12 +244,16 @@ static int kysona_raw_event(struct hid_device *hdev,
{
struct kysona_drvdata *drv_data = hid_get_drvdata(hdev);
- if (drv_data->battery && size == sizeof(kysona_battery_request) &&
+ if (size == sizeof(kysona_online_request) &&
+ data[0] == 8 && data[1] == ONLINE_REPORT_ID) {
+ drv_data->online = data[6];
+ }
+
+ if (size == sizeof(kysona_battery_request) &&
data[0] == 8 && data[1] == BATTERY_REPORT_ID) {
drv_data->battery_capacity = data[6];
drv_data->battery_charging = data[7];
drv_data->battery_voltage = (data[8] << 8) | data[9];
- drv_data->online = true;
}
return 0;
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index c0a138f21ca4..445623dd1bd6 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -823,7 +823,7 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr
for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) {
if (entry->wdata.alternate_modes & BIT(i)) {
/* Print tag and full name */
- count += scnprintf(buf + count, PAGE_SIZE - count, "%s: %s",
+ count += sysfs_emit_at(buf, count, "%s: %s",
lg4ff_alternate_modes[i].tag,
!lg4ff_alternate_modes[i].product_id ? entry->wdata.real_name : lg4ff_alternate_modes[i].name);
if (count >= PAGE_SIZE - 1)
@@ -832,9 +832,9 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr
/* Mark the currently active mode with an asterisk */
if (lg4ff_alternate_modes[i].product_id == entry->wdata.product_id ||
(lg4ff_alternate_modes[i].product_id == 0 && entry->wdata.product_id == entry->wdata.real_product_id))
- count += scnprintf(buf + count, PAGE_SIZE - count, " *\n");
+ count += sysfs_emit_at(buf, count, " *\n");
else
- count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
+ count += sysfs_emit_at(buf, count, "\n");
if (count >= PAGE_SIZE - 1)
return count;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index adfa329e917b..d4d91e49bbe8 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -218,7 +218,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
int pressure = 0;
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
x = (tdata[1] << 28 | tdata[0] << 20) >> 20;
y = -((tdata[2] << 24 | tdata[1] << 16) >> 20);
@@ -370,7 +371,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
if (report_undeciphered) {
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
else if (input->id.product !=
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
@@ -497,7 +499,8 @@ static int magicmouse_raw_event(struct hid_device *hdev,
}
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
@@ -519,7 +522,8 @@ static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
- if (msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ if ((msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) &&
field->report->id == MOUSE2_REPORT_ID) {
/*
* magic_mouse_raw_event has done all the work. Skip hidinput.
@@ -540,7 +544,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_KEY, input->evbit);
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
if (emulate_3button)
@@ -625,7 +630,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
@@ -741,19 +747,25 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev)
int ret;
int feature_size;
- if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
- if (hdev->vendor == BT_VENDOR_ID_APPLE) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2:
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC:
+ switch (hdev->vendor) {
+ case BT_VENDOR_ID_APPLE:
feature_size = sizeof(feature_mt_trackpad2_bt);
feature = feature_mt_trackpad2_bt;
- } else { /* USB_VENDOR_ID_APPLE */
+ break;
+ default: /* USB_VENDOR_ID_APPLE */
feature_size = sizeof(feature_mt_trackpad2_usb);
feature = feature_mt_trackpad2_usb;
}
- } else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2:
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC:
feature_size = sizeof(feature_mt_mouse2);
feature = feature_mt_mouse2;
- } else {
+ break;
+ default:
feature_size = sizeof(feature_mt);
feature = feature_mt;
}
@@ -787,6 +799,7 @@ static int magicmouse_fetch_battery(struct hid_device *hdev)
if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
(hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC &&
hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC))
return -1;
@@ -857,6 +870,7 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->vendor == USB_VENDOR_ID_APPLE &&
(id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
hdev->type != HID_TYPE_USBMOUSE)))
@@ -868,21 +882,27 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_stop_hw;
}
- if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
- report = hid_register_report(hdev, HID_INPUT_REPORT,
- MOUSE_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
- report = hid_register_report(hdev, HID_INPUT_REPORT,
- MOUSE2_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
- if (id->vendor == BT_VENDOR_ID_APPLE)
+ switch (id->product) {
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE:
+ report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE_REPORT_ID, 0);
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2:
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC:
+ report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE2_REPORT_ID, 0);
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2:
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC:
+ switch (id->vendor) {
+ case BT_VENDOR_ID_APPLE:
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_BT_REPORT_ID, 0);
- else /* USB_VENDOR_ID_APPLE */
+ break;
+ default:
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_USB_REPORT_ID, 0);
- } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ }
+ break;
+ default: /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD_REPORT_ID, 0);
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -909,7 +929,8 @@ static int magicmouse_probe(struct hid_device *hdev,
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
- if (ret == -EIO && id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ if (ret == -EIO && (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)) {
schedule_delayed_work(&msc->work, msecs_to_jiffies(500));
}
@@ -945,6 +966,7 @@ static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
if (hdev->vendor == USB_VENDOR_ID_APPLE &&
(hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
*rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
@@ -971,6 +993,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
diff --git a/drivers/hid/hid-mcp2200.c b/drivers/hid/hid-mcp2200.c
index bf57f7f6caa0..e6ea0a2140eb 100644
--- a/drivers/hid/hid-mcp2200.c
+++ b/drivers/hid/hid-mcp2200.c
@@ -127,8 +127,8 @@ static int mcp_cmd_read_all(struct mcp2200 *mcp)
return mcp->status;
}
-static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct mcp2200 *mcp = gpiochip_get_data(gc);
u8 value;
@@ -150,16 +150,20 @@ static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
if (status == sizeof(struct mcp_set_clear_outputs))
mcp->gpio_val = value;
+ else
+ status = -EIO;
mutex_unlock(&mcp->lock);
+
+ return status;
}
-static void mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
+static int mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
{
unsigned long mask = 1 << gpio_nr;
unsigned long bmap_value = value << gpio_nr;
- mcp_set_multiple(gc, &mask, &bmap_value);
+ return mcp_set_multiple(gc, &mask, &bmap_value);
}
static int mcp_get_multiple(struct gpio_chip *gc, unsigned long *mask,
@@ -263,9 +267,10 @@ static int mcp_direction_output(struct gpio_chip *gc, unsigned int gpio_nr,
bmap_value = value << gpio_nr;
ret = mcp_set_direction(gc, gpio_nr, MCP2200_DIR_OUT);
- if (!ret)
- mcp_set_multiple(gc, &mask, &bmap_value);
- return ret;
+ if (ret)
+ return ret;
+
+ return mcp_set_multiple(gc, &mask, &bmap_value);
}
static const struct gpio_chip template_chip = {
@@ -274,8 +279,8 @@ static const struct gpio_chip template_chip = {
.get_direction = mcp_get_direction,
.direction_input = mcp_direction_input,
.direction_output = mcp_direction_output,
- .set = mcp_set,
- .set_multiple = mcp_set_multiple,
+ .set_rv = mcp_set,
+ .set_multiple_rv = mcp_set_multiple,
.get = mcp_get,
.get_multiple = mcp_get_multiple,
.base = -1,
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 0f93c22a479f..6c0ac14f11a6 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -624,10 +624,10 @@ static int mcp_gpio_get(struct gpio_chip *gc,
return ret;
}
-static void mcp_gpio_set(struct gpio_chip *gc,
- unsigned int offset, int value)
+static int mcp_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mcp2221 *mcp = gpiochip_get_data(gc);
+ int ret;
memset(mcp->txbuf, 0, 18);
mcp->txbuf[0] = MCP2221_GPIO_SET;
@@ -638,8 +638,10 @@ static void mcp_gpio_set(struct gpio_chip *gc,
mcp->txbuf[mcp->gp_idx] = !!value;
mutex_lock(&mcp->lock);
- mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 18);
mutex_unlock(&mcp->lock);
+
+ return ret;
}
static int mcp_gpio_dir_set(struct mcp2221 *mcp,
@@ -1206,7 +1208,7 @@ static int mcp2221_probe(struct hid_device *hdev,
mcp->gc->direction_input = mcp_gpio_direction_input;
mcp->gc->direction_output = mcp_gpio_direction_output;
mcp->gc->get_direction = mcp_gpio_get_direction;
- mcp->gc->set = mcp_gpio_set;
+ mcp->gc->set_rv = mcp_gpio_set;
mcp->gc->get = mcp_gpio_get;
mcp->gc->ngpio = MCP_NGPIO;
mcp->gc->base = -1;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 7ac8e16e6158..ded0fef7d8c7 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1887,6 +1887,16 @@ static void mt_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static void mt_on_hid_hw_open(struct hid_device *hdev)
+{
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
+}
+
+static void mt_on_hid_hw_close(struct hid_device *hdev)
+{
+ mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_NONE);
+}
+
/*
* This list contains only:
* - VID/PID of products not working with the default multitouch handling
@@ -2354,5 +2364,7 @@ static struct hid_driver mt_driver = {
.suspend = pm_ptr(mt_suspend),
.reset_resume = pm_ptr(mt_reset_resume),
.resume = pm_ptr(mt_resume),
+ .on_hid_hw_open = mt_on_hid_hw_open,
+ .on_hid_hw_close = mt_on_hid_hw_close,
};
module_hid_driver(mt_driver);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 0731473cc9b1..7fefeb413ec3 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -1063,7 +1063,7 @@ bool hid_ignore(struct hid_device *hdev)
}
if (hdev->type == HID_TYPE_USBMOUSE &&
- hid_match_id(hdev, hid_mouse_ignore_list))
+ hdev->quirks & HID_QUIRK_IGNORE_MOUSE)
return true;
return !!hid_match_id(hdev, hid_ignore_list);
@@ -1267,6 +1267,9 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
if (hid_match_id(hdev, hid_ignore_list))
quirks |= HID_QUIRK_IGNORE;
+ if (hid_match_id(hdev, hid_mouse_ignore_list))
+ quirks |= HID_QUIRK_IGNORE_MOUSE;
+
if (hid_match_id(hdev, hid_have_special_driver))
quirks |= HID_QUIRK_HAVE_SPECIAL_DRIVER;
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index fa51155ebe39..8a8c4a46f927 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -82,15 +82,10 @@ static int quicki2c_acpi_get_dsd_property(struct acpi_device *adev, acpi_string
{
acpi_handle handle = acpi_device_handle(adev);
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object obj = { .type = type };
- struct acpi_object_list arg_list = {
- .count = 1,
- .pointer = &obj,
- };
union acpi_object *ret_obj;
acpi_status status;
- status = acpi_evaluate_object(handle, dsd_method_name, &arg_list, &buffer);
+ status = acpi_evaluate_object(handle, dsd_method_name, NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle,
"Can't evaluate %s method: %d\n", dsd_method_name, status);
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
index 4fc78b5a04b5..c105df7f6c87 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -1121,7 +1121,7 @@ EXPORT_SYMBOL_NS_GPL(thc_port_select, "INTEL_THC");
static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
{
- int frequency[] = {
+ static const int frequency[] = {
THC_SPI_FREQUENCY_7M,
THC_SPI_FREQUENCY_15M,
THC_SPI_FREQUENCY_17M,
@@ -1130,7 +1130,7 @@ static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
THC_SPI_FREQUENCY_31M,
THC_SPI_FREQUENCY_41M,
};
- u8 frequency_div[] = {
+ static const u8 frequency_div[] = {
THC_SPI_FRQ_DIV_2,
THC_SPI_FRQ_DIV_1,
THC_SPI_FRQ_DIV_7,
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 7d9297fad90e..d4cbecc668ec 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid)
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
+ struct hid_class_descriptor *hcdesc;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
- int ret, n;
- int num_descriptors;
- size_t offset = offsetof(struct hid_descriptor, desc);
+ int ret;
quirks = hid_lookup_quirk(hid);
@@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
- if (hdesc->bLength < sizeof(struct hid_descriptor)) {
- dbg_hid("hid descriptor is too short\n");
+ if (!hdesc->bNumDescriptors ||
+ hdesc->bLength != sizeof(*hdesc) +
+ (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
+ dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
+ hdesc->bLength, hdesc->bNumDescriptors);
return -EINVAL;
}
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- num_descriptors = min_t(int, hdesc->bNumDescriptors,
- (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
-
- for (n = 0; n < num_descriptors; n++)
- if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
- rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT)
+ rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
@@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid)
goto err;
}
+ if (hdesc->bNumDescriptors > 1)
+ hid_warn(intf,
+ "%u unsupported optional hid class descriptors\n",
+ (int)(hdesc->bNumDescriptors - 1));
+
hid->quirks |= quirks;
return 0;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 6c1416167bd2..1cd188b73b74 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -5,17 +5,18 @@ menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
depends on (X86 && X86_LOCAL_APIC && HYPERVISOR_GUEST) \
- || (ACPI && ARM64 && !CPU_BIG_ENDIAN)
+ || (ARM64 && !CPU_BIG_ENDIAN)
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select OF_EARLY_FLATTREE if OF
+ select SYSFB if !HYPERV_VTL_MODE
help
Select this option to run Linux as a Hyper-V client operating
system.
config HYPERV_VTL_MODE
bool "Enable Linux to boot in VTL context"
- depends on X86_64 && HYPERV
+ depends on (X86_64 || ARM64) && HYPERV
depends on SMP
default n
help
@@ -31,7 +32,7 @@ config HYPERV_VTL_MODE
Select this option to build a Linux kernel to run at a VTL other than
the normal VTL0, which currently is only VTL2. This option
- initializes the x86 platform for VTL2, and adds the ability to boot
+ initializes the kernel to run in VTL2, and adds the ability to boot
secondary CPUs directly into 64-bit context as required for VTLs other
than 0. A kernel built with this option must run at VTL2, and will
not run as a normal guest.
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8351360bba16..be490c598785 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -207,10 +207,19 @@ int vmbus_connect(void)
mutex_init(&vmbus_connection.channel_mutex);
/*
+ * The following Hyper-V interrupt and monitor pages can be used by
+ * UIO for mapping to user-space, so they should always be allocated on
+ * system page boundaries. The system page size must be >= the Hyper-V
+ * page size.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
+
+ /*
* Setup the vmbus event connection for channel interrupt
* abstraction stuff
*/
- vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page();
+ vmbus_connection.int_page =
+ (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (vmbus_connection.int_page == NULL) {
ret = -ENOMEM;
goto cleanup;
@@ -225,8 +234,8 @@ int vmbus_connect(void)
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
- vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page();
- vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page();
+ vmbus_connection.monitor_pages[0] = (void *)__get_free_page(GFP_KERNEL);
+ vmbus_connection.monitor_pages[1] = (void *)__get_free_page(GFP_KERNEL);
if ((vmbus_connection.monitor_pages[0] == NULL) ||
(vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
@@ -342,21 +351,23 @@ void vmbus_disconnect(void)
destroy_workqueue(vmbus_connection.work_queue);
if (vmbus_connection.int_page) {
- hv_free_hyperv_page(vmbus_connection.int_page);
+ free_page((unsigned long)vmbus_connection.int_page);
vmbus_connection.int_page = NULL;
}
if (vmbus_connection.monitor_pages[0]) {
if (!set_memory_encrypted(
(unsigned long)vmbus_connection.monitor_pages[0], 1))
- hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+ free_page((unsigned long)
+ vmbus_connection.monitor_pages[0]);
vmbus_connection.monitor_pages[0] = NULL;
}
if (vmbus_connection.monitor_pages[1]) {
if (!set_memory_encrypted(
(unsigned long)vmbus_connection.monitor_pages[1], 1))
- hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+ free_page((unsigned long)
+ vmbus_connection.monitor_pages[1]);
vmbus_connection.monitor_pages[1] = NULL;
}
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 59792e00cecf..49898d10faff 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -105,45 +105,6 @@ void __init hv_common_free(void)
hv_synic_eventring_tail = NULL;
}
-/*
- * Functions for allocating and freeing memory with size and
- * alignment HV_HYP_PAGE_SIZE. These functions are needed because
- * the guest page size may not be the same as the Hyper-V page
- * size. We depend upon kmalloc() aligning power-of-two size
- * allocations to the allocation size boundary, so that the
- * allocated memory appears to Hyper-V as a page of the size
- * it expects.
- */
-
-void *hv_alloc_hyperv_page(void)
-{
- BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
-
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL);
- else
- return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
-
-void *hv_alloc_hyperv_zeroed_page(void)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- else
- return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
-
-void hv_free_hyperv_page(void *addr)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- free_page((unsigned long)addr);
- else
- kfree(addr);
-}
-EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
-
static void *hv_panic_page;
/*
@@ -272,7 +233,7 @@ static void hv_kmsg_dump_unregister(void)
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_report_block);
- hv_free_hyperv_page(hv_panic_page);
+ kfree(hv_panic_page);
hv_panic_page = NULL;
}
@@ -280,7 +241,7 @@ static void hv_kmsg_dump_register(void)
{
int ret;
- hv_panic_page = hv_alloc_hyperv_zeroed_page();
+ hv_panic_page = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hv_panic_page) {
pr_err("Hyper-V: panic message page memory allocation failed\n");
return;
@@ -289,7 +250,7 @@ static void hv_kmsg_dump_register(void)
ret = kmsg_dump_register(&hv_kmsg_dumper);
if (ret) {
pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
- hv_free_hyperv_page(hv_panic_page);
+ kfree(hv_panic_page);
hv_panic_page = NULL;
}
}
@@ -317,6 +278,37 @@ void __init hv_get_partition_id(void)
pr_err("Hyper-V: failed to get partition ID: %#x\n",
hv_result(status));
}
+#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
+u8 __init get_vtl(void)
+{
+ u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
+ struct hv_input_get_vp_registers *input;
+ struct hv_output_get_vp_registers *output;
+ unsigned long flags;
+ u64 ret;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, struct_size(input, names, 1));
+ input->partition_id = HV_PARTITION_ID_SELF;
+ input->vp_index = HV_VP_INDEX_SELF;
+ input->input_vtl.as_uint8 = 0;
+ input->names[0] = HV_REGISTER_VSM_VP_STATUS;
+
+ ret = hv_do_hypercall(control, input, output);
+ if (hv_result_success(ret)) {
+ ret = output->values[0].reg8 & HV_VTL_MASK;
+ } else {
+ pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
+ BUG();
+ }
+
+ local_irq_restore(flags);
+ return ret;
+}
+#endif
int __init hv_common_init(void)
{
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index d74adb5bba44..33b524b4eb5e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,7 +45,8 @@ struct vmbus_dynid {
struct hv_vmbus_device_id id;
};
-static struct device *hv_dev;
+/* VMBus Root Device */
+static struct device *vmbus_root_device;
static int hyperv_cpuhp_online;
@@ -80,9 +81,15 @@ static struct resource *fb_mmio;
static struct resource *hyperv_mmio;
static DEFINE_MUTEX(hyperv_mmio_lock);
+struct device *hv_get_vmbus_root_device(void)
+{
+ return vmbus_root_device;
+}
+EXPORT_SYMBOL_GPL(hv_get_vmbus_root_device);
+
static int vmbus_exists(void)
{
- if (hv_dev == NULL)
+ if (vmbus_root_device == NULL)
return -ENODEV;
return 0;
@@ -707,7 +714,30 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(const struct hv_driver *
return id;
}
-/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
+/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices
+ *
+ * This function can race with vmbus_device_register(). This function is
+ * typically running on a user thread in response to writing to the "new_id"
+ * sysfs entry for a driver. vmbus_device_register() is running on a
+ * workqueue thread in response to the Hyper-V host offering a device to the
+ * guest. This function calls driver_attach(), which looks for an existing
+ * device matching the new id, and attaches the driver to which the new id
+ * has been assigned. vmbus_device_register() calls device_register(), which
+ * looks for a driver that matches the device being registered. If both
+ * operations are running simultaneously, the device driver probe function runs
+ * on whichever thread establishes the linkage between the driver and device.
+ *
+ * In most cases, it doesn't matter which thread runs the driver probe
+ * function. But if vmbus_device_register() does not find a matching driver,
+ * it proceeds to create the "channels" subdirectory and numbered per-channel
+ * subdirectory in sysfs. While that multi-step creation is in progress, this
+ * function could run the driver probe function. If the probe function checks
+ * for, or operates on, entries in the "channels" subdirectory, including by
+ * calling hv_create_ring_sysfs(), the operation may or may not succeed
+ * depending on the race. The race can't create a kernel failure in VMBus
+ * or device subsystem code, but probe functions in VMBus drivers doing such
+ * operations must be prepared for the failure case.
+ */
static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
{
struct vmbus_dynid *dynid;
@@ -861,7 +891,7 @@ static int vmbus_dma_configure(struct device *child_device)
* On x86/x64 coherence is assumed and these calls have no effect.
*/
hv_setup_dma_ops(child_device,
- device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT);
+ device_get_dma_attr(vmbus_root_device) == DEV_DMA_COHERENT);
return 0;
}
@@ -1921,7 +1951,8 @@ static const struct kobj_type vmbus_chan_ktype = {
* ring for userspace to use.
* Note: Race conditions can happen with userspace and it is not encouraged to create new
* use-cases for this. This was added to maintain backward compatibility, while solving
- * one of the race conditions in uio_hv_generic while creating sysfs.
+ * one of the race conditions in uio_hv_generic while creating sysfs. See comments with
+ * vmbus_add_dynid() and vmbus_device_register().
*
* Returns 0 on success or error code on failure.
*/
@@ -2037,7 +2068,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
&child_device_obj->channel->offermsg.offer.if_instance);
child_device_obj->device.bus = &hv_bus;
- child_device_obj->device.parent = hv_dev;
+ child_device_obj->device.parent = vmbus_root_device;
child_device_obj->device.release = vmbus_device_release;
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
@@ -2055,6 +2086,20 @@ int vmbus_device_register(struct hv_device *child_device_obj)
return ret;
}
+ /*
+ * If device_register() found a driver to assign to the device, the
+ * driver's probe function has already run at this point. If that
+ * probe function accesses or operates on the "channels" subdirectory
+ * in sysfs, those operations will have failed because the "channels"
+ * subdirectory doesn't exist until the code below runs. Or if the
+ * probe function creates a /dev entry, a user space program could
+ * find and open the /dev entry, and then create a race by accessing
+ * the "channels" subdirectory while the creation steps are in progress
+ * here. The race can't result in a kernel failure, but the user space
+ * program may get an error in accessing "channels" or its
+ * subdirectories. See also comments with vmbus_add_dynid() about a
+ * related race condition.
+ */
child_device_obj->channels_kset = kset_create_and_add("channels",
NULL, kobj);
if (!child_device_obj->channels_kset) {
@@ -2412,7 +2457,7 @@ static int vmbus_acpi_add(struct platform_device *pdev)
struct acpi_device *ancestor;
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
- hv_dev = &device->dev;
+ vmbus_root_device = &device->dev;
/*
* Older versions of Hyper-V for ARM64 fail to include the _CCA
@@ -2465,6 +2510,31 @@ static int vmbus_acpi_add(struct platform_device *pdev)
}
#endif
+static int vmbus_set_irq(struct platform_device *pdev)
+{
+ struct irq_data *data;
+ int irq;
+ irq_hw_number_t hwirq;
+
+ irq = platform_get_irq(pdev, 0);
+ /* platform_get_irq() may not return 0. */
+ if (irq < 0)
+ return irq;
+
+ data = irq_get_irq_data(irq);
+ if (!data) {
+ pr_err("No interrupt data for VMBus virq %d\n", irq);
+ return -ENODEV;
+ }
+ hwirq = irqd_to_hwirq(data);
+
+ vmbus_irq = irq;
+ vmbus_interrupt = hwirq;
+ pr_debug("VMBus virq %d, hwirq %d\n", vmbus_irq, vmbus_interrupt);
+
+ return 0;
+}
+
static int vmbus_device_add(struct platform_device *pdev)
{
struct resource **cur_res = &hyperv_mmio;
@@ -2473,12 +2543,17 @@ static int vmbus_device_add(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- hv_dev = &pdev->dev;
+ vmbus_root_device = &pdev->dev;
ret = of_range_parser_init(&parser, np);
if (ret)
return ret;
+ if (!__is_defined(HYPERVISOR_CALLBACK_VECTOR))
+ ret = vmbus_set_irq(pdev);
+ if (ret)
+ return ret;
+
for_each_of_range(&parser, &range) {
struct resource *res;
@@ -2786,7 +2861,7 @@ static int __init hv_acpi_init(void)
if (ret)
return ret;
- if (!hv_dev) {
+ if (!vmbus_root_device) {
ret = -ENODEV;
goto cleanup;
}
@@ -2817,7 +2892,7 @@ static int __init hv_acpi_init(void)
cleanup:
platform_driver_unregister(&vmbus_platform_driver);
- hv_dev = NULL;
+ vmbus_root_device = NULL;
return ret;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5fd93aad2d6d..1b1d64493909 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -335,6 +335,26 @@ config SENSORS_K10TEMP
This driver can also be built as a module. If so, the module
will be called k10temp.
+config SENSORS_KBATT
+ tristate "KEBA battery controller support"
+ depends on KEBA_CP500
+ help
+ This driver supports the battery monitoring controller found in
+ KEBA system FPGA devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called kbatt.
+
+config SENSORS_KFAN
+ tristate "KEBA fan controller support"
+ depends on KEBA_CP500
+ help
+ This driver supports the fan controller found in KEBA system
+ FPGA devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called kfan.
+
config SENSORS_FAM15H_POWER
tristate "AMD Family 15h processor power"
depends on X86 && PCI && CPU_SUP_AMD
@@ -1308,6 +1328,15 @@ config SENSORS_MAX31790
This driver can also be built as a module. If so, the module
will be called max31790.
+config SENSORS_MAX77705
+ tristate "MAX77705 current and voltage sensor"
+ depends on MFD_MAX77705
+ help
+ If you say yes here you get support for MAX77705 sensors connected with I2C.
+
+ This driver can also be built as a module. If so, the module
+ will be called max77705-hwmon.
+
config SENSORS_MC34VR500
tristate "NXP MC34VR500 hardware monitoring driver"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e3468d024ff3..48e5866c0c9a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -110,6 +110,8 @@ obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
+obj-$(CONFIG_SENSORS_KBATT) += kbatt.o
+obj-$(CONFIG_SENSORS_KFAN) += kfan.o
obj-$(CONFIG_SENSORS_LAN966X) += lan966x-hwmon.o
obj-$(CONFIG_SENSORS_LENOVO_EC) += lenovo-ec-sensors.o
obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
@@ -161,6 +163,7 @@ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_MAX31827) += max31827.o
+obj-$(CONFIG_SENSORS_MAX77705) += max77705-hwmon.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MC34VR500) += mc34vr500.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
index 312ef3e98754..d1c55e2eb479 100644
--- a/drivers/hwmon/aht10.c
+++ b/drivers/hwmon/aht10.c
@@ -94,7 +94,7 @@ struct aht10_data {
unsigned int meas_size;
};
-/**
+/*
* aht10_init() - Initialize an AHT10/AHT20 chip
* @data: the data associated with this AHT10/AHT20 chip
* Return: 0 if successful, 1 if not
@@ -124,7 +124,7 @@ static int aht10_init(struct aht10_data *data)
return 0;
}
-/**
+/*
* aht10_polltime_expired() - check if the minimum poll interval has
* expired
* @data: the data containing the time to compare
@@ -140,7 +140,7 @@ static int aht10_polltime_expired(struct aht10_data *data)
DECLARE_CRC8_TABLE(crc8_table);
-/**
+/*
* crc8_check() - check crc of the sensor's measurements
* @raw_data: data frame received from sensor(including crc as the last byte)
* @count: size of the data frame
@@ -155,7 +155,7 @@ static int crc8_check(u8 *raw_data, int count)
return crc8(crc8_table, raw_data, count, CRC8_INIT_VALUE);
}
-/**
+/*
* aht10_read_values() - read and parse the raw data from the AHT10/AHT20
* @data: the struct aht10_data to use for the lock
* Return: 0 if successful, 1 if not
@@ -214,7 +214,7 @@ static int aht10_read_values(struct aht10_data *data)
return 0;
}
-/**
+/*
* aht10_interval_write() - store the given minimum poll interval.
* Return: 0 on success, -EINVAL if a value lower than the
* AHT10_MIN_POLL_INTERVAL is given
@@ -226,7 +226,7 @@ static ssize_t aht10_interval_write(struct aht10_data *data,
return 0;
}
-/**
+/*
* aht10_interval_read() - read the minimum poll interval
* in milliseconds
*/
@@ -237,7 +237,7 @@ static ssize_t aht10_interval_read(struct aht10_data *data,
return 0;
}
-/**
+/*
* aht10_temperature1_read() - read the temperature in millidegrees
*/
static int aht10_temperature1_read(struct aht10_data *data, long *val)
@@ -252,7 +252,7 @@ static int aht10_temperature1_read(struct aht10_data *data, long *val)
return 0;
}
-/**
+/*
* aht10_humidity1_read() - read the relative humidity in millipercent
*/
static int aht10_humidity1_read(struct aht10_data *data, long *val)
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 1e3c6acd8974..13a789cc85d2 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -23,9 +23,12 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
+#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <dt-bindings/pwm/pwm.h>
+
/*
* Addresses to scan.
*/
@@ -37,7 +40,7 @@ static const unsigned short normal_i2c[] = {0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e,
* Insmod parameters
*/
-static int pwminv; /*Inverted PWM output. */
+static int pwminv = -1; /*Inverted PWM output. */
module_param(pwminv, int, 0444);
static int init = 1; /*Power-on initialization.*/
@@ -845,9 +848,43 @@ static int amc6821_detect(struct i2c_client *client, struct i2c_board_info *info
return 0;
}
-static int amc6821_init_client(struct amc6821_data *data)
+static enum pwm_polarity amc6821_pwm_polarity(struct i2c_client *client)
+{
+ enum pwm_polarity polarity = PWM_POLARITY_NORMAL;
+ struct of_phandle_args args;
+ struct device_node *fan_np;
+
+ /*
+ * For backward compatibility, the pwminv module parameter takes
+ * always the precedence over any other device description
+ */
+ if (pwminv == 0)
+ return PWM_POLARITY_NORMAL;
+ if (pwminv > 0)
+ return PWM_POLARITY_INVERSED;
+
+ fan_np = of_get_child_by_name(client->dev.of_node, "fan");
+ if (!fan_np)
+ return PWM_POLARITY_NORMAL;
+
+ if (of_parse_phandle_with_args(fan_np, "pwms", "#pwm-cells", 0, &args))
+ goto out;
+ of_node_put(args.np);
+
+ if (args.args_count != 2)
+ goto out;
+
+ if (args.args[1] & PWM_POLARITY_INVERTED)
+ polarity = PWM_POLARITY_INVERSED;
+out:
+ of_node_put(fan_np);
+ return polarity;
+}
+
+static int amc6821_init_client(struct i2c_client *client, struct amc6821_data *data)
{
struct regmap *regmap = data->regmap;
+ u32 regval;
int err;
if (init) {
@@ -864,11 +901,14 @@ static int amc6821_init_client(struct amc6821_data *data)
if (err)
return err;
+ regval = AMC6821_CONF1_START;
+ if (amc6821_pwm_polarity(client) == PWM_POLARITY_INVERSED)
+ regval |= AMC6821_CONF1_PWMINV;
+
err = regmap_update_bits(regmap, AMC6821_REG_CONF1,
AMC6821_CONF1_THERMOVIE | AMC6821_CONF1_FANIE |
AMC6821_CONF1_START | AMC6821_CONF1_PWMINV,
- AMC6821_CONF1_START |
- (pwminv ? AMC6821_CONF1_PWMINV : 0));
+ regval);
if (err)
return err;
}
@@ -916,7 +956,7 @@ static int amc6821_probe(struct i2c_client *client)
"Failed to initialize regmap\n");
data->regmap = regmap;
- err = amc6821_init_client(data);
+ err = amc6821_init_client(client, data);
if (err)
return err;
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 006ced5ab6e6..e0a95197c71b 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -169,7 +169,11 @@ enum board_family {
family_intel_600_series
};
-/* All the known sensors for ASUS EC controllers */
+/*
+ * All the known sensors for ASUS EC controllers. These arrays have to be sorted
+ * by the full ((bank << 8) + index) register index (see asus_ec_block_read() as
+ * to why).
+ */
static const struct ec_sensor_info sensors_family_amd_400[] = {
[ec_sensor_temp_chipset] =
EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
@@ -183,10 +187,10 @@ static const struct ec_sensor_info sensors_family_amd_400[] = {
EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
[ec_sensor_in_cpu_core] =
EC_SENSOR("CPU Core", hwmon_in, 2, 0x00, 0xa2),
- [ec_sensor_fan_cpu_opt] =
- EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
[ec_sensor_fan_vrm_hs] =
EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
[ec_sensor_fan_chipset] =
/* no chipset fans in this generation */
EC_SENSOR("Chipset", hwmon_fan, 0, 0x00, 0x00),
@@ -194,10 +198,10 @@ static const struct ec_sensor_info sensors_family_amd_400[] = {
EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xb4),
[ec_sensor_curr_cpu] =
EC_SENSOR("CPU", hwmon_curr, 1, 0x00, 0xf4),
- [ec_sensor_temp_water_in] =
- EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x0d),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x0b),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x0d),
};
static const struct ec_sensor_info sensors_family_amd_500[] = {
@@ -239,19 +243,20 @@ static const struct ec_sensor_info sensors_family_amd_500[] = {
static const struct ec_sensor_info sensors_family_amd_600[] = {
[ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x30),
- [ec_sensor_temp_cpu_package] = EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
+ [ec_sensor_temp_cpu_package] =
+ EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
[ec_sensor_temp_mb] =
EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x32),
[ec_sensor_temp_vrm] =
EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x33),
[ec_sensor_temp_t_sensor] =
EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x36),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
[ec_sensor_temp_water_in] =
EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
- [ec_sensor_fan_cpu_opt] =
- EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
};
static const struct ec_sensor_info sensors_family_intel_300[] = {
@@ -278,6 +283,14 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
[ec_sensor_temp_t_sensor] =
EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
[ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
+ [ec_sensor_fan_water_flow] =
+ EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbe),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
+ [ec_sensor_temp_water_out] =
+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+ [ec_sensor_temp_water_block_in] =
+ EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02),
};
/* Shortcuts for common combinations */
@@ -300,6 +313,15 @@ struct ec_board_info {
enum board_family family;
};
+static const struct ec_board_info board_info_maximus_vi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_intel_300_series,
+};
+
static const struct ec_board_info board_info_prime_x470_pro = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -402,6 +424,13 @@ static const struct ec_board_info board_info_maximus_xi_hero = {
.family = family_intel_300_series,
};
+static const struct ec_board_info board_info_maximus_z690_formula = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_SET_TEMP_WATER | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
static const struct ec_board_info board_info_crosshair_viii_impact = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -507,6 +536,8 @@ static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
}
static const struct dmi_system_id dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("MAXIMUS VI HERO",
+ &board_info_maximus_vi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
&board_info_prime_x470_pro),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
@@ -537,6 +568,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_maximus_xi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
&board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS Z690 FORMULA",
+ &board_info_maximus_z690_formula),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
&board_info_crosshair_viii_impact),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
@@ -933,6 +966,10 @@ static int asus_ec_hwmon_read_string(struct device *dev,
{
struct ec_sensors_data *state = dev_get_drvdata(dev);
int sensor_index = find_ec_sensor_index(state, type, channel);
+
+ if (sensor_index < 0)
+ return sensor_index;
+
*str = get_sensor_info(state, sensor_index)->label;
return 0;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 79e5606e6d2f..1e2c8e284001 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1274,6 +1274,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
+ .ident = "Dell OptiPlex 7050",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7050"),
+ },
+ },
+ {
.ident = "Dell Precision",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index b779240328d5..516c34bb61c9 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -20,6 +20,9 @@
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
struct gpio_fan_speed {
@@ -42,6 +45,7 @@ struct gpio_fan_data {
bool pwm_enable;
struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
+ struct regulator *supply;
};
/*
@@ -125,13 +129,32 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
}
/* Must be called with fan_data->lock held, except during initialization. */
-static void set_fan_speed(struct gpio_fan_data *fan_data, int speed_index)
+static int set_fan_speed(struct gpio_fan_data *fan_data, int speed_index)
{
if (fan_data->speed_index == speed_index)
- return;
+ return 0;
+
+ if (fan_data->speed_index == 0 && speed_index > 0) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(fan_data->dev);
+ if (ret < 0)
+ return ret;
+ }
__set_fan_ctrl(fan_data, fan_data->speed[speed_index].ctrl_val);
+
+ if (fan_data->speed_index > 0 && speed_index == 0) {
+ int ret;
+
+ ret = pm_runtime_put_sync(fan_data->dev);
+ if (ret < 0)
+ return ret;
+ }
+
fan_data->speed_index = speed_index;
+
+ return 0;
}
static int get_fan_speed_index(struct gpio_fan_data *fan_data)
@@ -176,7 +199,7 @@ static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long pwm;
int speed_index;
- int ret = count;
+ int ret;
if (kstrtoul(buf, 10, &pwm) || pwm > 255)
return -EINVAL;
@@ -189,12 +212,12 @@ static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
}
speed_index = DIV_ROUND_UP(pwm * (fan_data->num_speed - 1), 255);
- set_fan_speed(fan_data, speed_index);
+ ret = set_fan_speed(fan_data, speed_index);
exit_unlock:
mutex_unlock(&fan_data->lock);
- return ret;
+ return ret ? ret : count;
}
static ssize_t pwm1_enable_show(struct device *dev,
@@ -211,6 +234,7 @@ static ssize_t pwm1_enable_store(struct device *dev,
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long val;
+ int ret = 0;
if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
@@ -224,11 +248,11 @@ static ssize_t pwm1_enable_store(struct device *dev,
/* Disable manual control mode: set fan at full speed. */
if (val == 0)
- set_fan_speed(fan_data, fan_data->num_speed - 1);
+ ret = set_fan_speed(fan_data, fan_data->num_speed - 1);
mutex_unlock(&fan_data->lock);
- return count;
+ return ret ? ret : count;
}
static ssize_t pwm1_mode_show(struct device *dev,
@@ -279,7 +303,7 @@ static ssize_t set_rpm(struct device *dev, struct device_attribute *attr,
goto exit_unlock;
}
- set_fan_speed(fan_data, rpm_to_speed_index(fan_data, rpm));
+ ret = set_fan_speed(fan_data, rpm_to_speed_index(fan_data, rpm));
exit_unlock:
mutex_unlock(&fan_data->lock);
@@ -386,6 +410,7 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct gpio_fan_data *fan_data = cdev->devdata;
+ int ret;
if (!fan_data)
return -EINVAL;
@@ -395,11 +420,11 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, state);
+ ret = set_fan_speed(fan_data, state);
mutex_unlock(&fan_data->lock);
- return 0;
+ return ret;
}
static const struct thermal_cooling_device_ops gpio_fan_cool_ops = {
@@ -499,6 +524,8 @@ static void gpio_fan_stop(void *data)
mutex_lock(&fan_data->lock);
set_fan_speed(data, 0);
mutex_unlock(&fan_data->lock);
+
+ pm_runtime_disable(fan_data->dev);
}
static int gpio_fan_probe(struct platform_device *pdev)
@@ -521,6 +548,11 @@ static int gpio_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fan_data);
mutex_init(&fan_data->lock);
+ fan_data->supply = devm_regulator_get(dev, "fan");
+ if (IS_ERR(fan_data->supply))
+ return dev_err_probe(dev, PTR_ERR(fan_data->supply),
+ "Failed to get fan-supply");
+
/* Configure control GPIOs if available. */
if (fan_data->gpios && fan_data->num_gpios > 0) {
if (!fan_data->speed || fan_data->num_speed <= 1)
@@ -548,6 +580,17 @@ static int gpio_fan_probe(struct platform_device *pdev)
return err;
}
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ /* If current GPIO state is active, mark RPM as active as well */
+ if (fan_data->speed_index > 0) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+ }
+
/* Optional cooling device register for Device tree platforms */
fan_data->cdev = devm_thermal_of_cooling_device_register(dev, np,
"gpio-fan", fan_data, &gpio_fan_cool_ops);
@@ -565,41 +608,69 @@ static void gpio_fan_shutdown(struct platform_device *pdev)
set_fan_speed(fan_data, 0);
}
+static int gpio_fan_runtime_suspend(struct device *dev)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fan_data->supply)
+ ret = regulator_disable(fan_data->supply);
+
+ return ret;
+}
+
+static int gpio_fan_runtime_resume(struct device *dev)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fan_data->supply)
+ ret = regulator_enable(fan_data->supply);
+
+ return ret;
+}
+
static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, 0);
+ ret = set_fan_speed(fan_data, 0);
mutex_unlock(&fan_data->lock);
}
- return 0;
+ return ret;
}
static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
if (fan_data->gpios) {
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, fan_data->resume_speed);
+ ret = set_fan_speed(fan_data, fan_data->resume_speed);
mutex_unlock(&fan_data->lock);
}
- return 0;
+ return ret;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
+static const struct dev_pm_ops gpio_fan_pm = {
+ RUNTIME_PM_OPS(gpio_fan_runtime_suspend,
+ gpio_fan_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(gpio_fan_suspend, gpio_fan_resume)
+};
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
- .pm = pm_sleep_ptr(&gpio_fan_pm),
+ .pm = pm_ptr(&gpio_fan_pm),
.of_match_table = of_gpio_fan_match,
},
};
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 2d9f12f68d50..a4a41742786b 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -21,11 +21,14 @@
#define INA238_CONFIG 0x0
#define INA238_ADC_CONFIG 0x1
#define INA238_SHUNT_CALIBRATION 0x2
+#define SQ52206_SHUNT_TEMPCO 0x3
#define INA238_SHUNT_VOLTAGE 0x4
#define INA238_BUS_VOLTAGE 0x5
#define INA238_DIE_TEMP 0x6
#define INA238_CURRENT 0x7
#define INA238_POWER 0x8
+#define SQ52206_ENERGY 0x9
+#define SQ52206_CHARGE 0xa
#define INA238_DIAG_ALERT 0xb
#define INA238_SHUNT_OVER_VOLTAGE 0xc
#define INA238_SHUNT_UNDER_VOLTAGE 0xd
@@ -33,9 +36,12 @@
#define INA238_BUS_UNDER_VOLTAGE 0xf
#define INA238_TEMP_LIMIT 0x10
#define INA238_POWER_LIMIT 0x11
+#define SQ52206_POWER_PEAK 0x20
#define INA238_DEVICE_ID 0x3f /* not available on INA237 */
#define INA238_CONFIG_ADCRANGE BIT(4)
+#define SQ52206_CONFIG_ADCRANGE_HIGH BIT(4)
+#define SQ52206_CONFIG_ADCRANGE_LOW BIT(3)
#define INA238_DIAG_ALERT_TMPOL BIT(7)
#define INA238_DIAG_ALERT_SHNTOL BIT(6)
@@ -44,12 +50,13 @@
#define INA238_DIAG_ALERT_BUSUL BIT(3)
#define INA238_DIAG_ALERT_POL BIT(2)
-#define INA238_REGISTERS 0x11
+#define INA238_REGISTERS 0x20
#define INA238_RSHUNT_DEFAULT 10000 /* uOhm */
/* Default configuration of device on reset. */
#define INA238_CONFIG_DEFAULT 0
+#define SQ52206_CONFIG_DEFAULT 0x0005
/* 16 sample averaging, 1052us conversion time, continuous mode */
#define INA238_ADC_CONFIG_DEFAULT 0xfb6a
/* Configure alerts to be based on averaged value (SLOWALERT) */
@@ -87,14 +94,19 @@
* shunt = 0x4000 / (819.2 * 10^6) / 0.001 = 20000 uOhms (with 1mA/lsb)
*
* Current (mA) = register value * 20000 / rshunt / 4 * gain
- * Power (W) = 0.2 * register value * 20000 / rshunt / 4 * gain
+ * Power (mW) = 0.2 * register value * 20000 / rshunt / 4 * gain
+ * (Specific for SQ52206)
+ * Power (mW) = 0.24 * register value * 20000 / rshunt / 4 * gain
+ * Energy (mJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain
*/
#define INA238_CALIBRATION_VALUE 16384
#define INA238_FIXED_SHUNT 20000
#define INA238_SHUNT_VOLTAGE_LSB 5 /* 5 uV/lsb */
#define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */
-#define INA238_DIE_TEMP_LSB 125 /* 125 mC/lsb */
+#define INA238_DIE_TEMP_LSB 1250000 /* 125.0000 mC/lsb */
+#define SQ52206_BUS_VOLTAGE_LSB 3750 /* 3.75 mV/lsb */
+#define SQ52206_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
static const struct regmap_config ina238_regmap_config = {
.max_register = INA238_REGISTERS,
@@ -102,7 +114,20 @@ static const struct regmap_config ina238_regmap_config = {
.val_bits = 16,
};
+enum ina238_ids { ina238, ina237, sq52206 };
+
+struct ina238_config {
+ bool has_power_highest; /* chip detection power peak */
+ bool has_energy; /* chip detection energy */
+ u8 temp_shift; /* fixed parameters for temp calculate */
+ u32 power_calculate_factor; /* fixed parameters for power calculate */
+ u16 config_default; /* Power-on default state */
+ int bus_voltage_lsb; /* use for temperature calculate, uV/lsb */
+ int temp_lsb; /* use for temperature calculate */
+};
+
struct ina238_data {
+ const struct ina238_config *config;
struct i2c_client *client;
struct mutex config_lock;
struct regmap *regmap;
@@ -110,6 +135,36 @@ struct ina238_data {
int gain;
};
+static const struct ina238_config ina238_config[] = {
+ [ina238] = {
+ .has_energy = false,
+ .has_power_highest = false,
+ .temp_shift = 4,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_lsb = INA238_DIE_TEMP_LSB,
+ },
+ [ina237] = {
+ .has_energy = false,
+ .has_power_highest = false,
+ .temp_shift = 4,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_lsb = INA238_DIE_TEMP_LSB,
+ },
+ [sq52206] = {
+ .has_energy = true,
+ .has_power_highest = true,
+ .temp_shift = 0,
+ .power_calculate_factor = 24,
+ .config_default = SQ52206_CONFIG_DEFAULT,
+ .bus_voltage_lsb = SQ52206_BUS_VOLTAGE_LSB,
+ .temp_lsb = SQ52206_DIE_TEMP_LSB,
+ },
+};
+
static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
{
u8 data[3];
@@ -126,6 +181,24 @@ static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
return 0;
}
+static int ina238_read_reg40(const struct i2c_client *client, u8 reg, u64 *val)
+{
+ u8 data[5];
+ u32 low;
+ int err;
+
+ /* 40-bit register read */
+ err = i2c_smbus_read_i2c_block_data(client, reg, 5, data);
+ if (err < 0)
+ return err;
+ if (err != 5)
+ return -EIO;
+ low = (data[1] << 24) | (data[2] << 16) | (data[3] << 8) | data[4];
+ *val = ((long long)data[0] << 32) | low;
+
+ return 0;
+}
+
static int ina238_read_in(struct device *dev, u32 attr, int channel,
long *val)
{
@@ -197,10 +270,10 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
regval = (s16)regval;
if (channel == 0)
/* gain of 1 -> LSB / 4 */
- *val = (regval * INA238_SHUNT_VOLTAGE_LSB) /
- (1000 * (4 - data->gain + 1));
+ *val = (regval * INA238_SHUNT_VOLTAGE_LSB) *
+ data->gain / (1000 * 4);
else
- *val = (regval * INA238_BUS_VOLTAGE_LSB) / 1000;
+ *val = (regval * data->config->bus_voltage_lsb) / 1000;
break;
case hwmon_in_max_alarm:
case hwmon_in_min_alarm:
@@ -225,8 +298,8 @@ static int ina238_write_in(struct device *dev, u32 attr, int channel,
case 0:
/* signed value, clamp to max range +/-163 mV */
regval = clamp_val(val, -163, 163);
- regval = (regval * 1000 * (4 - data->gain + 1)) /
- INA238_SHUNT_VOLTAGE_LSB;
+ regval = (regval * 1000 * 4) /
+ (INA238_SHUNT_VOLTAGE_LSB * data->gain);
regval = clamp_val(regval, S16_MIN, S16_MAX);
switch (attr) {
@@ -242,7 +315,7 @@ static int ina238_write_in(struct device *dev, u32 attr, int channel,
case 1:
/* signed value, positive values only. Clamp to max 102.396 V */
regval = clamp_val(val, 0, 102396);
- regval = (regval * 1000) / INA238_BUS_VOLTAGE_LSB;
+ regval = (regval * 1000) / data->config->bus_voltage_lsb;
regval = clamp_val(regval, 0, S16_MAX);
switch (attr) {
@@ -297,8 +370,19 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
return err;
/* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT *
- data->gain, 20 * data->rshunt);
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ /* Clamp value to maximum value of long */
+ *val = clamp_val(power, 0, LONG_MAX);
+ break;
+ case hwmon_power_input_highest:
+ err = ina238_read_reg24(data->client, SQ52206_POWER_PEAK, &regval);
+ if (err)
+ return err;
+
+ /* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -311,8 +395,8 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
* Truncated 24-bit compare register, lower 8-bits are
* truncated. Same conversion to/from uW as POWER register.
*/
- power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT *
- data->gain, 20 * data->rshunt);
+ power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -344,8 +428,8 @@ static int ina238_write_power(struct device *dev, u32 attr, long val)
* register.
*/
regval = clamp_val(val, 0, LONG_MAX);
- regval = div_u64(val * 20ULL * data->rshunt,
- 1000ULL * INA238_FIXED_SHUNT * data->gain);
+ regval = div_u64(val * 4 * 100 * data->rshunt, data->config->power_calculate_factor *
+ 1000ULL * INA238_FIXED_SHUNT * data->gain);
regval = clamp_val(regval >> 8, 0, U16_MAX);
return regmap_write(data->regmap, INA238_POWER_LIMIT, regval);
@@ -362,17 +446,17 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
err = regmap_read(data->regmap, INA238_DIE_TEMP, &regval);
if (err)
return err;
-
- /* Signed, bits 15-4 of register, result in mC */
- *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ /* Signed, result in mC */
+ *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max:
err = regmap_read(data->regmap, INA238_TEMP_LIMIT, &regval);
if (err)
return err;
-
- /* Signed, bits 15-4 of register, result in mC */
- *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ /* Signed, result in mC */
+ *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max_alarm:
err = regmap_read(data->regmap, INA238_DIAG_ALERT, &regval);
@@ -396,13 +480,33 @@ static int ina238_write_temp(struct device *dev, u32 attr, long val)
if (attr != hwmon_temp_max)
return -EOPNOTSUPP;
- /* Signed, bits 15-4 of register */
- regval = (val / INA238_DIE_TEMP_LSB) << 4;
- regval = clamp_val(regval, S16_MIN, S16_MAX) & 0xfff0;
+ /* Signed */
+ regval = clamp_val(val, -40000, 125000);
+ regval = div_s64(val * 10000, data->config->temp_lsb) << data->config->temp_shift;
+ regval = clamp_val(regval, S16_MIN, S16_MAX) & (0xffff << data->config->temp_shift);
return regmap_write(data->regmap, INA238_TEMP_LIMIT, regval);
}
+static ssize_t energy1_input_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int ret;
+ u64 regval;
+ u64 energy;
+
+ ret = ina238_read_reg40(data->client, SQ52206_ENERGY, &regval);
+ if (ret)
+ return ret;
+
+ /* result in mJ */
+ energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+
+ return sysfs_emit(buf, "%llu\n", energy);
+}
+
static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -422,7 +526,7 @@ static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
}
static int ina238_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
+ u32 attr, int channel, long val)
{
struct ina238_data *data = dev_get_drvdata(dev);
int err;
@@ -452,6 +556,9 @@ static umode_t ina238_is_visible(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ const struct ina238_data *data = drvdata;
+ bool has_power_highest = data->config->has_power_highest;
+
switch (type) {
case hwmon_in:
switch (attr) {
@@ -479,6 +586,10 @@ static umode_t ina238_is_visible(const void *drvdata,
return 0444;
case hwmon_power_max:
return 0644;
+ case hwmon_power_input_highest:
+ if (has_power_highest)
+ return 0444;
+ return 0;
default:
return 0;
}
@@ -512,7 +623,8 @@ static const struct hwmon_channel_info * const ina238_info[] = {
HWMON_C_INPUT),
HWMON_CHANNEL_INFO(power,
/* 0: power */
- HWMON_P_INPUT | HWMON_P_MAX | HWMON_P_MAX_ALARM),
+ HWMON_P_INPUT | HWMON_P_MAX |
+ HWMON_P_MAX_ALARM | HWMON_P_INPUT_HIGHEST),
HWMON_CHANNEL_INFO(temp,
/* 0: die temperature */
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_ALARM),
@@ -530,20 +642,35 @@ static const struct hwmon_chip_info ina238_chip_info = {
.info = ina238_info,
};
+/* energy attributes are 5 bytes wide so we need u64 */
+static DEVICE_ATTR_RO(energy1_input);
+
+static struct attribute *ina238_attrs[] = {
+ &dev_attr_energy1_input.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ina238);
+
static int ina238_probe(struct i2c_client *client)
{
struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct ina238_data *data;
+ enum ina238_ids chip;
int config;
int ret;
+ chip = (uintptr_t)i2c_get_match_data(client);
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
+ /* set the device type */
+ data->config = &ina238_config[chip];
+
mutex_init(&data->config_lock);
data->regmap = devm_regmap_init_i2c(client, &ina238_regmap_config);
@@ -564,15 +691,21 @@ static int ina238_probe(struct i2c_client *client)
/* load shunt gain value */
if (device_property_read_u32(dev, "ti,shunt-gain", &data->gain) < 0)
data->gain = 4; /* Default of ADCRANGE = 0 */
- if (data->gain != 1 && data->gain != 4) {
+ if (data->gain != 1 && data->gain != 2 && data->gain != 4) {
dev_err(dev, "invalid shunt gain value %u\n", data->gain);
return -EINVAL;
}
/* Setup CONFIG register */
- config = INA238_CONFIG_DEFAULT;
- if (data->gain == 1)
+ config = data->config->config_default;
+ if (chip == sq52206) {
+ if (data->gain == 1)
+ config |= SQ52206_CONFIG_ADCRANGE_HIGH; /* ADCRANGE = 10/11 is /1 */
+ else if (data->gain == 2)
+ config |= SQ52206_CONFIG_ADCRANGE_LOW; /* ADCRANGE = 01 is /2 */
+ } else if (data->gain == 1) {
config |= INA238_CONFIG_ADCRANGE; /* ADCRANGE = 1 is /1 */
+ }
ret = regmap_write(data->regmap, INA238_CONFIG, config);
if (ret < 0) {
dev_err(dev, "error configuring the device: %d\n", ret);
@@ -605,7 +738,8 @@ static int ina238_probe(struct i2c_client *client)
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
&ina238_chip_info,
- NULL);
+ data->config->has_energy ?
+ ina238_groups : NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
@@ -616,15 +750,27 @@ static int ina238_probe(struct i2c_client *client)
}
static const struct i2c_device_id ina238_id[] = {
- { "ina238" },
+ { "ina237", ina237 },
+ { "ina238", ina238 },
+ { "sq52206", sq52206 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ina238_id);
static const struct of_device_id __maybe_unused ina238_of_match[] = {
- { .compatible = "ti,ina237" },
- { .compatible = "ti,ina238" },
- { },
+ {
+ .compatible = "ti,ina237",
+ .data = (void *)ina237
+ },
+ {
+ .compatible = "ti,ina238",
+ .data = (void *)ina238
+ },
+ {
+ .compatible = "silergy,sq52206",
+ .data = (void *)sq52206
+ },
+ { }
};
MODULE_DEVICE_TABLE(of, ina238_of_match);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 345fe7db9de9..bc3c1f7314b3 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -959,8 +959,12 @@ static int ina2xx_probe(struct i2c_client *client)
return PTR_ERR(data->regmap);
}
- ret = devm_regulator_get_enable(dev, "vs");
- if (ret)
+ /*
+ * Regulator core returns -ENODEV if the 'vs' is not available.
+ * Hence the check for -ENODEV return code is necessary.
+ */
+ ret = devm_regulator_get_enable_optional(dev, "vs");
+ if (ret < 0 && ret != -ENODEV)
return dev_err_probe(dev, ret, "failed to enable vs regulator\n");
ret = ina2xx_init(dev, data);
diff --git a/drivers/hwmon/isl28022.c b/drivers/hwmon/isl28022.c
index 1fb9864635db..c2e559dde63f 100644
--- a/drivers/hwmon/isl28022.c
+++ b/drivers/hwmon/isl28022.c
@@ -154,6 +154,7 @@ static int isl28022_read_current(struct device *dev, u32 attr, long *val)
struct isl28022_data *data = dev_get_drvdata(dev);
unsigned int regval;
int err;
+ u16 sign_bit;
switch (attr) {
case hwmon_curr_input:
@@ -161,8 +162,9 @@ static int isl28022_read_current(struct device *dev, u32 attr, long *val)
ISL28022_REG_CURRENT, &regval);
if (err < 0)
return err;
- *val = ((long)regval * 1250L * (long)data->gain) /
- (long)data->shunt;
+ sign_bit = (regval >> 15) & 0x01;
+ *val = (((long)(((u16)regval) & 0x7FFF) - (sign_bit * 32768)) *
+ 1250L * (long)data->gain) / (long)data->shunt;
break;
default:
return -EOPNOTSUPP;
@@ -301,7 +303,7 @@ static const struct regmap_config isl28022_regmap_config = {
.writeable_reg = isl28022_is_writeable_reg,
.volatile_reg = isl28022_is_volatile_reg,
.val_format_endian = REGMAP_ENDIAN_BIG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.use_single_read = true,
.use_single_write = true,
};
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 472bcf6092f6..babf2413d666 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -503,6 +503,13 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
k10temp_get_ccd_support(data, 12);
break;
}
+ } else if (boot_cpu_data.x86 == 0x1a) {
+ switch (boot_cpu_data.x86_model) {
+ case 0x40 ... 0x4f: /* Zen5 Ryzen Desktop */
+ data->ccd_offset = 0x308;
+ k10temp_get_ccd_support(data, 8);
+ break;
+ }
}
for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
diff --git a/drivers/hwmon/kbatt.c b/drivers/hwmon/kbatt.c
new file mode 100644
index 000000000000..501b8f4ded33
--- /dev/null
+++ b/drivers/hwmon/kbatt.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 KEBA Industrial Automation GmbH
+ *
+ * Driver for KEBA battery monitoring controller FPGA IP core
+ */
+
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/misc/keba.h>
+#include <linux/mutex.h>
+
+#define KBATT "kbatt"
+
+#define KBATT_CONTROL_REG 0x4
+#define KBATT_CONTROL_BAT_TEST 0x01
+
+#define KBATT_STATUS_REG 0x8
+#define KBATT_STATUS_BAT_OK 0x01
+
+#define KBATT_MAX_UPD_INTERVAL (10 * HZ)
+#define KBATT_SETTLE_TIME_US (100 * USEC_PER_MSEC)
+
+struct kbatt {
+ /* update lock */
+ struct mutex lock;
+ void __iomem *base;
+
+ unsigned long next_update; /* in jiffies */
+ bool alarm;
+};
+
+static bool kbatt_alarm(struct kbatt *kbatt)
+{
+ mutex_lock(&kbatt->lock);
+
+ if (!kbatt->next_update || time_after(jiffies, kbatt->next_update)) {
+ /* switch load on */
+ iowrite8(KBATT_CONTROL_BAT_TEST,
+ kbatt->base + KBATT_CONTROL_REG);
+
+ /* wait some time to let things settle */
+ fsleep(KBATT_SETTLE_TIME_US);
+
+ /* check battery state */
+ if (ioread8(kbatt->base + KBATT_STATUS_REG) &
+ KBATT_STATUS_BAT_OK)
+ kbatt->alarm = false;
+ else
+ kbatt->alarm = true;
+
+ /* switch load off */
+ iowrite8(0, kbatt->base + KBATT_CONTROL_REG);
+
+ kbatt->next_update = jiffies + KBATT_MAX_UPD_INTERVAL;
+ }
+
+ mutex_unlock(&kbatt->lock);
+
+ return kbatt->alarm;
+}
+
+static int kbatt_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct kbatt *kbatt = dev_get_drvdata(dev);
+
+ *val = kbatt_alarm(kbatt) ? 1 : 0;
+
+ return 0;
+}
+
+static umode_t kbatt_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (channel == 0 && attr == hwmon_in_min_alarm)
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *kbatt_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ /* 0: input minimum alarm channel */
+ HWMON_I_MIN_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops kbatt_hwmon_ops = {
+ .is_visible = kbatt_is_visible,
+ .read = kbatt_read,
+};
+
+static const struct hwmon_chip_info kbatt_chip_info = {
+ .ops = &kbatt_hwmon_ops,
+ .info = kbatt_info,
+};
+
+static int kbatt_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct keba_batt_auxdev *kbatt_auxdev =
+ container_of(auxdev, struct keba_batt_auxdev, auxdev);
+ struct device *dev = &auxdev->dev;
+ struct device *hwmon_dev;
+ struct kbatt *kbatt;
+ int retval;
+
+ kbatt = devm_kzalloc(dev, sizeof(*kbatt), GFP_KERNEL);
+ if (!kbatt)
+ return -ENOMEM;
+
+ retval = devm_mutex_init(dev, &kbatt->lock);
+ if (retval)
+ return retval;
+
+ kbatt->base = devm_ioremap_resource(dev, &kbatt_auxdev->io);
+ if (IS_ERR(kbatt->base))
+ return PTR_ERR(kbatt->base);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, KBATT, kbatt,
+ &kbatt_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct auxiliary_device_id kbatt_devtype_aux[] = {
+ { .name = "keba.batt" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, kbatt_devtype_aux);
+
+static struct auxiliary_driver kbatt_driver_aux = {
+ .name = KBATT,
+ .id_table = kbatt_devtype_aux,
+ .probe = kbatt_probe,
+};
+module_auxiliary_driver(kbatt_driver_aux);
+
+MODULE_AUTHOR("Petar Bojanic <boja@keba.com>");
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA battery monitoring controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/kfan.c b/drivers/hwmon/kfan.c
new file mode 100644
index 000000000000..f353acb66749
--- /dev/null
+++ b/drivers/hwmon/kfan.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 KEBA Industrial Automation GmbH
+ *
+ * Driver for KEBA fan controller FPGA IP core
+ *
+ */
+
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/misc/keba.h>
+
+#define KFAN "kfan"
+
+#define KFAN_CONTROL_REG 0x04
+
+#define KFAN_STATUS_REG 0x08
+#define KFAN_STATUS_PRESENT 0x01
+#define KFAN_STATUS_REGULABLE 0x02
+#define KFAN_STATUS_TACHO 0x04
+#define KFAN_STATUS_BLOCKED 0x08
+
+#define KFAN_TACHO_REG 0x0c
+
+#define KFAN_DEFAULT_DIV 2
+
+struct kfan {
+ void __iomem *base;
+ bool tacho;
+ bool regulable;
+
+ /* hwmon API configuration */
+ u32 fan_channel_config[2];
+ struct hwmon_channel_info fan_info;
+ u32 pwm_channel_config[2];
+ struct hwmon_channel_info pwm_info;
+ const struct hwmon_channel_info *info[3];
+ struct hwmon_chip_info chip;
+};
+
+static bool kfan_get_fault(struct kfan *kfan)
+{
+ u8 status = ioread8(kfan->base + KFAN_STATUS_REG);
+
+ if (!(status & KFAN_STATUS_PRESENT))
+ return true;
+
+ if (!kfan->tacho && (status & KFAN_STATUS_BLOCKED))
+ return true;
+
+ return false;
+}
+
+static unsigned int kfan_count_to_rpm(u16 count)
+{
+ if (count == 0 || count == 0xffff)
+ return 0;
+
+ return 5000000UL / (KFAN_DEFAULT_DIV * count);
+}
+
+static unsigned int kfan_get_rpm(struct kfan *kfan)
+{
+ unsigned int rpm;
+ u16 count;
+
+ count = ioread16(kfan->base + KFAN_TACHO_REG);
+ rpm = kfan_count_to_rpm(count);
+
+ return rpm;
+}
+
+static unsigned int kfan_get_pwm(struct kfan *kfan)
+{
+ return ioread8(kfan->base + KFAN_CONTROL_REG);
+}
+
+static int kfan_set_pwm(struct kfan *kfan, long val)
+{
+ if (val < 0 || val > 0xff)
+ return -EINVAL;
+
+ /* if none-regulable, then only 0 or 0xff can be written */
+ if (!kfan->regulable && val > 0)
+ val = 0xff;
+
+ iowrite8(val, kfan->base + KFAN_CONTROL_REG);
+
+ return 0;
+}
+
+static int kfan_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct kfan *kfan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return kfan_set_pwm(kfan, val);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int kfan_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct kfan *kfan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_fault:
+ *val = kfan_get_fault(kfan);
+ return 0;
+ case hwmon_fan_input:
+ *val = kfan_get_rpm(kfan);
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = kfan_get_pwm(kfan);
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t kfan_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops kfan_hwmon_ops = {
+ .is_visible = kfan_is_visible,
+ .read = kfan_read,
+ .write = kfan_write,
+};
+
+static int kfan_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct keba_fan_auxdev *kfan_auxdev =
+ container_of(auxdev, struct keba_fan_auxdev, auxdev);
+ struct device *dev = &auxdev->dev;
+ struct device *hwmon_dev;
+ struct kfan *kfan;
+ u8 status;
+
+ kfan = devm_kzalloc(dev, sizeof(*kfan), GFP_KERNEL);
+ if (!kfan)
+ return -ENOMEM;
+
+ kfan->base = devm_ioremap_resource(dev, &kfan_auxdev->io);
+ if (IS_ERR(kfan->base))
+ return PTR_ERR(kfan->base);
+
+ status = ioread8(kfan->base + KFAN_STATUS_REG);
+ if (status & KFAN_STATUS_REGULABLE)
+ kfan->regulable = true;
+ if (status & KFAN_STATUS_TACHO)
+ kfan->tacho = true;
+
+ /* fan */
+ kfan->fan_channel_config[0] = HWMON_F_FAULT;
+ if (kfan->tacho)
+ kfan->fan_channel_config[0] |= HWMON_F_INPUT;
+ kfan->fan_info.type = hwmon_fan;
+ kfan->fan_info.config = kfan->fan_channel_config;
+ kfan->info[0] = &kfan->fan_info;
+
+ /* PWM */
+ kfan->pwm_channel_config[0] = HWMON_PWM_INPUT;
+ kfan->pwm_info.type = hwmon_pwm;
+ kfan->pwm_info.config = kfan->pwm_channel_config;
+ kfan->info[1] = &kfan->pwm_info;
+
+ kfan->chip.ops = &kfan_hwmon_ops;
+ kfan->chip.info = kfan->info;
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, KFAN, kfan,
+ &kfan->chip, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct auxiliary_device_id kfan_devtype_aux[] = {
+ { .name = "keba.fan" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, kfan_devtype_aux);
+
+static struct auxiliary_driver kfan_driver_aux = {
+ .name = KFAN,
+ .id_table = kfan_devtype_aux,
+ .probe = kfan_probe,
+};
+module_auxiliary_driver(kfan_driver_aux);
+
+MODULE_AUTHOR("Petar Bojanic <boja@keba.com>");
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA fan controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index d95a3c6c245c..9b4875e2fd8d 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -622,7 +622,7 @@ static int lm75_i3c_reg_read(void *context, unsigned int reg, unsigned int *val)
{
.rnw = true,
.len = 2,
- .data.out = data->val_buf,
+ .data.in = data->val_buf,
},
};
int ret;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 75f09553fd67..c1f528e292f3 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1235,7 +1235,7 @@ static int lm90_update_alarms(struct lm90_data *data, bool force)
static void lm90_alert_work(struct work_struct *__work)
{
- struct delayed_work *delayed_work = container_of(__work, struct delayed_work, work);
+ struct delayed_work *delayed_work = to_delayed_work(__work);
struct lm90_data *data = container_of(delayed_work, struct lm90_data, alert_work);
/* Nothing to do if alerts are enabled */
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 541fa09dc6e7..a07e2eb93c71 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -256,33 +256,38 @@ static int ltc2992_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask
return 0;
}
-static void ltc2992_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int ltc2992_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ltc2992_state *st = gpiochip_get_data(chip);
unsigned long gpio_ctrl;
- int reg;
+ int reg, ret;
mutex_lock(&st->gpio_mutex);
reg = ltc2992_read_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1);
if (reg < 0) {
mutex_unlock(&st->gpio_mutex);
- return;
+ return reg;
}
gpio_ctrl = reg;
assign_bit(ltc2992_gpio_addr_map[offset].ctrl_bit, &gpio_ctrl, value);
- ltc2992_write_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1, gpio_ctrl);
+ ret = ltc2992_write_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1,
+ gpio_ctrl);
mutex_unlock(&st->gpio_mutex);
+
+ return ret;
}
-static void ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct ltc2992_state *st = gpiochip_get_data(chip);
unsigned long gpio_ctrl_io = 0;
unsigned long gpio_ctrl = 0;
unsigned int gpio_nr;
+ int ret;
for_each_set_bit(gpio_nr, mask, LTC2992_GPIO_NR) {
if (gpio_nr < 3)
@@ -293,9 +298,14 @@ static void ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mas
}
mutex_lock(&st->gpio_mutex);
- ltc2992_write_reg(st, LTC2992_GPIO_IO_CTRL, 1, gpio_ctrl_io);
- ltc2992_write_reg(st, LTC2992_GPIO_CTRL, 1, gpio_ctrl);
+ ret = ltc2992_write_reg(st, LTC2992_GPIO_IO_CTRL, 1, gpio_ctrl_io);
+ if (ret)
+ goto out;
+
+ ret = ltc2992_write_reg(st, LTC2992_GPIO_CTRL, 1, gpio_ctrl);
+out:
mutex_unlock(&st->gpio_mutex);
+ return ret;
}
static int ltc2992_config_gpio(struct ltc2992_state *st)
@@ -329,8 +339,8 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
st->gc.get = ltc2992_gpio_get;
st->gc.get_multiple = ltc2992_gpio_get_multiple;
- st->gc.set = ltc2992_gpio_set;
- st->gc.set_multiple = ltc2992_gpio_set_multiple;
+ st->gc.set_rv = ltc2992_gpio_set;
+ st->gc.set_multiple_rv = ltc2992_gpio_set_multiple;
ret = devm_gpiochip_add_data(&st->client->dev, &st->gc, st);
if (ret)
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 32b4d54b2076..a06346496e1d 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -80,6 +80,7 @@ struct max6639_data {
/* Register values initialized only once */
u8 ppr[MAX6639_NUM_CHANNELS]; /* Pulses per rotation 0..3 for 1..4 ppr */
u8 rpm_range[MAX6639_NUM_CHANNELS]; /* Index in above rpm_ranges table */
+ u32 target_rpm[MAX6639_NUM_CHANNELS];
/* Optional regulator for FAN supply */
struct regulator *reg;
@@ -563,6 +564,10 @@ static int max6639_probe_child_from_dt(struct i2c_client *client,
if (!err)
data->rpm_range[i] = rpm_range_to_reg(val);
+ err = of_property_read_u32(child, "target-rpm", &val);
+ if (!err)
+ data->target_rpm[i] = val;
+
return 0;
}
@@ -573,6 +578,7 @@ static int max6639_init_client(struct i2c_client *client,
const struct device_node *np = dev->of_node;
struct device_node *child;
int i, err;
+ u8 target_duty;
/* Reset chip to default values, see below for GCONFIG setup */
err = regmap_write(data->regmap, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_POR);
@@ -586,6 +592,8 @@ static int max6639_init_client(struct i2c_client *client,
/* default: 4000 RPM */
data->rpm_range[0] = 1;
data->rpm_range[1] = 1;
+ data->target_rpm[0] = 4000;
+ data->target_rpm[1] = 4000;
for_each_child_of_node(np, child) {
if (strcmp(child->name, "fan"))
@@ -639,8 +647,12 @@ static int max6639_init_client(struct i2c_client *client,
if (err)
return err;
- /* PWM 120/120 (i.e. 100%) */
- err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(i), 120);
+ /* Set PWM based on target RPM if specified */
+ if (data->target_rpm[i] > rpm_ranges[data->rpm_range[i]])
+ data->target_rpm[i] = rpm_ranges[data->rpm_range[i]];
+
+ target_duty = 120 * data->target_rpm[i] / rpm_ranges[data->rpm_range[i]];
+ err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(i), target_duty);
if (err)
return err;
}
diff --git a/drivers/hwmon/max77705-hwmon.c b/drivers/hwmon/max77705-hwmon.c
new file mode 100644
index 000000000000..990023e6474e
--- /dev/null
+++ b/drivers/hwmon/max77705-hwmon.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MAX77705 voltage and current hwmon driver.
+ *
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/max77705-private.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct channel_desc {
+ u8 reg;
+ u8 avg_reg;
+ const char *const label;
+ // register resolution. nano Volts for voltage, nano Amperes for current
+ u32 resolution;
+};
+
+static const struct channel_desc current_channel_desc[] = {
+ {
+ .reg = IIN_REG,
+ .label = "IIN_REG",
+ .resolution = 125000
+ },
+ {
+ .reg = ISYS_REG,
+ .avg_reg = AVGISYS_REG,
+ .label = "ISYS_REG",
+ .resolution = 312500
+ }
+};
+
+static const struct channel_desc voltage_channel_desc[] = {
+ {
+ .reg = VBYP_REG,
+ .label = "VBYP_REG",
+ .resolution = 427246
+ },
+ {
+ .reg = VSYS_REG,
+ .label = "VSYS_REG",
+ .resolution = 156250
+ }
+};
+
+static int max77705_read_and_convert(struct regmap *regmap, u8 reg, u32 res,
+ bool is_signed, long *val)
+{
+ int ret;
+ u32 regval;
+
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ if (is_signed)
+ *val = mult_frac((long)sign_extend32(regval, 15), res, 1000000);
+ else
+ *val = mult_frac((long)regval, res, 1000000);
+
+ return 0;
+}
+
+static umode_t max77705_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_label:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_curr_average:
+ if (current_channel_desc[channel].avg_reg)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int max77705_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **buf)
+{
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_in_label:
+ *buf = current_channel_desc[channel].label;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *buf = voltage_channel_desc[channel].label;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int max77705_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u8 reg;
+ u32 res;
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ reg = current_channel_desc[channel].reg;
+ res = current_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, true, val);
+ case hwmon_curr_average:
+ reg = current_channel_desc[channel].avg_reg;
+ res = current_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, true, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ reg = voltage_channel_desc[channel].reg;
+ res = voltage_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, false, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops max77705_hwmon_ops = {
+ .is_visible = max77705_is_visible,
+ .read = max77705_read,
+ .read_string = max77705_read_string,
+};
+
+static const struct hwmon_channel_info *max77705_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL
+ ),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_AVERAGE | HWMON_C_LABEL
+ ),
+ NULL
+};
+
+static const struct hwmon_chip_info max77705_chip_info = {
+ .ops = &max77705_hwmon_ops,
+ .info = max77705_info,
+};
+
+static int max77705_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, "max77705", regmap,
+ &max77705_chip_info, NULL);
+ if (IS_ERR(hwmon_dev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hwmon_dev),
+ "Unable to register hwmon device\n");
+
+ return 0;
+};
+
+static struct platform_driver max77705_hwmon_driver = {
+ .driver = {
+ .name = "max77705-hwmon",
+ },
+ .probe = max77705_hwmon_probe,
+};
+
+module_platform_driver(max77705_hwmon_driver);
+
+MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>");
+MODULE_DESCRIPTION("MAX77705 monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct7363.c b/drivers/hwmon/nct7363.c
index be7bf32f6e68..e13ab918b1ab 100644
--- a/drivers/hwmon/nct7363.c
+++ b/drivers/hwmon/nct7363.c
@@ -391,7 +391,7 @@ static const struct regmap_config nct7363_regmap_config = {
.val_bits = 8,
.use_single_read = true,
.use_single_write = true,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = nct7363_regmap_is_volatile,
};
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index c9b3c3149982..441f984a859d 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -218,6 +218,24 @@ config SENSORS_LM25066_REGULATOR
If you say yes here you get regulator support for National
Semiconductor LM25066, LM5064, and LM5066.
+config SENSORS_LT3074
+ tristate "Analog Devices LT3074"
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices LT3074.
+
+ This driver can also be built as a module. If so, the module will
+ be called lt3074.
+
+config SENSORS_LT3074_REGULATOR
+ tristate "Regulator support for LT3074"
+ depends on SENSORS_LT3074 && REGULATOR
+ help
+ If you say yes here you get regulator support for Analog Devices
+ LT3074. The LT3074 is a low voltage, ultralow noise, high PSRR,
+ dropout linear regulator. The device supplies up to 3A with a
+ typical dropout voltage of 45mV.
+
config SENSORS_LT7182S
tristate "Analog Devices LT7182S"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 56f128c4653e..29cd8a3317d2 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o
obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
+obj-$(CONFIG_SENSORS_LT3074) += lt3074.o
obj-$(CONFIG_SENSORS_LT7182S) += lt7182s.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 40b0dda32ea6..dd7275a67a0a 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -437,7 +437,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
#if IS_ENABLED(CONFIG_SENSORS_LM25066_REGULATOR)
static const struct regulator_desc lm25066_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
#endif
diff --git a/drivers/hwmon/pmbus/lt3074.c b/drivers/hwmon/pmbus/lt3074.c
new file mode 100644
index 000000000000..3704dbe7b54a
--- /dev/null
+++ b/drivers/hwmon/pmbus/lt3074.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware monitoring driver for Analog Devices LT3074
+ *
+ * Copyright (C) 2025 Analog Devices, Inc.
+ */
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define LT3074_MFR_READ_VBIAS 0xc6
+#define LT3074_MFR_BIAS_OV_WARN_LIMIT 0xc7
+#define LT3074_MFR_BIAS_UV_WARN_LIMIT 0xc8
+#define LT3074_MFR_SPECIAL_ID 0xe7
+
+#define LT3074_SPECIAL_ID_VALUE 0x1c1d
+
+static const struct regulator_desc __maybe_unused lt3074_reg_desc[] = {
+ PMBUS_REGULATOR_ONE("regulator"),
+};
+
+static int lt3074_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_READ_VBIAS);
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_BIAS_UV_WARN_LIMIT);
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_BIAS_OV_WARN_LIMIT);
+ default:
+ return -ENODATA;
+ }
+}
+
+static int lt3074_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ switch (reg) {
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ return pmbus_write_word_data(client, 0,
+ LT3074_MFR_BIAS_UV_WARN_LIMIT,
+ word);
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ return pmbus_write_word_data(client, 0,
+ LT3074_MFR_BIAS_OV_WARN_LIMIT,
+ word);
+ default:
+ return -ENODATA;
+ }
+}
+
+static struct pmbus_driver_info lt3074_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_VMON |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = lt3074_read_word_data,
+ .write_word_data = lt3074_write_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_LT3074_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = lt3074_reg_desc,
+#endif
+};
+
+static int lt3074_probe(struct i2c_client *client)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_word_data(client, LT3074_MFR_SPECIAL_ID);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read ID\n");
+
+ if (ret != LT3074_SPECIAL_ID_VALUE)
+ return dev_err_probe(dev, -ENODEV, "ID mismatch\n");
+
+ return pmbus_do_probe(client, &lt3074_info);
+}
+
+static const struct i2c_device_id lt3074_id[] = {
+ { "lt3074", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lt3074_id);
+
+static const struct of_device_id __maybe_unused lt3074_of_match[] = {
+ { .compatible = "adi,lt3074" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lt3074_of_match);
+
+static struct i2c_driver lt3074_driver = {
+ .driver = {
+ .name = "lt3074",
+ .of_match_table = of_match_ptr(lt3074_of_match),
+ },
+ .probe = lt3074_probe,
+ .id_table = lt3074_id,
+};
+module_i2c_driver(lt3074_driver);
+
+MODULE_AUTHOR("Cedric Encarnacion <cedricjustine.encarnacion@analog.com>");
+MODULE_DESCRIPTION("PMBus driver for Analog Devices LT3074");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index c9dda33831ff..56834d26f8ef 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -12,9 +12,26 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/delay.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
+enum chips {
+ adpm12160,
+ max34440,
+ max34441,
+ max34446,
+ max34451,
+ max34460,
+ max34461,
+};
+
+/*
+ * Firmware is sometimes not ready if we try and read the
+ * data from the page immediately after setting. Maxim
+ * recommends 50us delay due to the chip failing to clock
+ * stretch long enough here.
+ */
+#define MAX34440_PAGE_CHANGE_DELAY 50
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -34,16 +51,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
/*
* The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
* swapped from the standard pmbus spec addresses.
+ * For max34451, version MAX34451ETNA6+ and later has this issue fixed.
*/
#define MAX34440_IOUT_OC_WARN_LIMIT 0x46
#define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A
+#define MAX34451ETNA6_MFR_REV 0x0012
+
#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
struct max34440_data {
int id;
struct pmbus_driver_info info;
+ u8 iout_oc_warn_limit;
+ u8 iout_oc_fault_limit;
};
#define to_max34440_data(x) container_of(x, struct max34440_data, info)
@@ -60,11 +82,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
- MAX34440_IOUT_OC_FAULT_LIMIT);
+ data->iout_oc_fault_limit);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
- MAX34440_IOUT_OC_WARN_LIMIT);
+ data->iout_oc_warn_limit);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page, phase,
@@ -75,7 +97,8 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
MAX34440_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_AVG:
- if (data->id != max34446 && data->id != max34451)
+ if (data->id != max34446 && data->id != max34451 &&
+ data->id != adpm12160)
return -ENXIO;
ret = pmbus_read_word_data(client, page, phase,
MAX34446_MFR_IOUT_AVG);
@@ -133,11 +156,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
+ ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit,
word);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
+ ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit,
word);
break;
case PMBUS_VIRT_RESET_POUT_HISTORY:
@@ -159,7 +182,8 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_IOUT_PEAK, 0);
- if (!ret && (data->id == max34446 || data->id == max34451))
+ if (!ret && (data->id == max34446 || data->id == max34451 ||
+ data->id == adpm12160))
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_IOUT_AVG, 0);
@@ -235,9 +259,29 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
*/
int page, rv;
+ bool max34451_na6 = false;
+
+ rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION);
+ if (rv < 0)
+ return rv;
+
+ if (rv >= MAX34451ETNA6_MFR_REV) {
+ max34451_na6 = true;
+ data->info.format[PSC_VOLTAGE_IN] = direct;
+ data->info.format[PSC_CURRENT_IN] = direct;
+ data->info.m[PSC_VOLTAGE_IN] = 1;
+ data->info.b[PSC_VOLTAGE_IN] = 0;
+ data->info.R[PSC_VOLTAGE_IN] = 3;
+ data->info.m[PSC_CURRENT_IN] = 1;
+ data->info.b[PSC_CURRENT_IN] = 0;
+ data->info.R[PSC_CURRENT_IN] = 2;
+ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
+ }
for (page = 0; page < 16; page++) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ fsleep(MAX34440_PAGE_CHANGE_DELAY);
if (rv < 0)
return rv;
@@ -251,16 +295,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
case 0x20:
data->info.func[page] = PMBUS_HAVE_VOUT |
PMBUS_HAVE_STATUS_VOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_VIN |
+ PMBUS_HAVE_STATUS_INPUT;
break;
case 0x21:
data->info.func[page] = PMBUS_HAVE_VOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_VIN;
break;
case 0x22:
data->info.func[page] = PMBUS_HAVE_IOUT |
PMBUS_HAVE_STATUS_IOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_IIN |
+ PMBUS_HAVE_STATUS_INPUT;
break;
case 0x23:
data->info.func[page] = PMBUS_HAVE_IOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_IIN;
break;
default:
break;
@@ -271,6 +329,41 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
}
static struct pmbus_driver_info max34440_info[] = {
+ [adpm12160] = {
+ .pages = 19,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .m[PSC_CURRENT_IN] = 1,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 2,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ /* absent func below [18] are not for monitoring */
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[4] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[6] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[7] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[8] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[9] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[10] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
[max34440] = {
.pages = 14,
.format[PSC_VOLTAGE_IN] = direct,
@@ -312,6 +405,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34441] = {
.pages = 12,
@@ -355,6 +449,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34446] = {
.pages = 7,
@@ -392,6 +487,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34451] = {
.pages = 21,
@@ -415,6 +511,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34460] = {
.pages = 18,
@@ -445,6 +542,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34461] = {
.pages = 23,
@@ -480,6 +578,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
};
@@ -494,17 +593,23 @@ static int max34440_probe(struct i2c_client *client)
return -ENOMEM;
data->id = i2c_match_id(max34440_id, client)->driver_data;
data->info = max34440_info[data->id];
+ data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT;
if (data->id == max34451) {
rv = max34451_set_supported_funcs(client, data);
if (rv)
return rv;
+ } else if (data->id == adpm12160) {
+ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
}
return pmbus_do_probe(client, &data->info);
}
static const struct i2c_device_id max34440_id[] = {
+ {"adpm12160", adpm12160},
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c
index c1e2d0cb2fd0..8f10e37a7a76 100644
--- a/drivers/hwmon/pmbus/mpq7932.c
+++ b/drivers/hwmon/pmbus/mpq7932.c
@@ -51,8 +51,8 @@ static const struct regulator_desc mpq7932_regulators_desc[] = {
};
static const struct regulator_desc mpq7932_regulators_desc_one[] = {
- PMBUS_REGULATOR_STEP_ONE("buck", MPQ7932_N_VOLTAGES,
- MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+ PMBUS_REGULATOR_STEP_ONE_NODE("buck", MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
};
#endif
diff --git a/drivers/hwmon/pmbus/mpq8785.c b/drivers/hwmon/pmbus/mpq8785.c
index 331c274ca892..1f56aaf4dde8 100644
--- a/drivers/hwmon/pmbus/mpq8785.c
+++ b/drivers/hwmon/pmbus/mpq8785.c
@@ -4,10 +4,23 @@
*/
#include <linux/i2c.h>
+#include <linux/bitops.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/of_device.h>
#include "pmbus.h"
+#define MPM82504_READ_TEMPERATURE_1_SIGN_POS 9
+
+enum chips { mpm3695, mpm3695_25, mpm82504, mpq8785 };
+
+static u16 voltage_scale_loop_max_val[] = {
+ [mpm3695] = GENMASK(9, 0),
+ [mpm3695_25] = GENMASK(11, 0),
+ [mpm82504] = GENMASK(9, 0),
+ [mpq8785] = GENMASK(10, 0),
+};
+
static int mpq8785_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
@@ -34,6 +47,20 @@ static int mpq8785_identify(struct i2c_client *client,
return 0;
};
+static int mpm82504_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int ret;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+
+ if (ret < 0 || reg != PMBUS_READ_TEMPERATURE_1)
+ return ret;
+
+ /* Fix PMBUS_READ_TEMPERATURE_1 signedness */
+ return sign_extend32(ret, MPM82504_READ_TEMPERATURE_1_SIGN_POS) & 0xffff;
+}
+
static struct pmbus_driver_info mpq8785_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = direct,
@@ -53,26 +80,74 @@ static struct pmbus_driver_info mpq8785_info = {
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .identify = mpq8785_identify,
-};
-
-static int mpq8785_probe(struct i2c_client *client)
-{
- return pmbus_do_probe(client, &mpq8785_info);
};
static const struct i2c_device_id mpq8785_id[] = {
- { "mpq8785" },
+ { "mpm3695", mpm3695 },
+ { "mpm3695-25", mpm3695_25 },
+ { "mpm82504", mpm82504 },
+ { "mpq8785", mpq8785 },
{ },
};
MODULE_DEVICE_TABLE(i2c, mpq8785_id);
static const struct of_device_id __maybe_unused mpq8785_of_match[] = {
- { .compatible = "mps,mpq8785" },
+ { .compatible = "mps,mpm3695", .data = (void *)mpm3695 },
+ { .compatible = "mps,mpm3695-25", .data = (void *)mpm3695_25 },
+ { .compatible = "mps,mpm82504", .data = (void *)mpm82504 },
+ { .compatible = "mps,mpq8785", .data = (void *)mpq8785 },
{}
};
MODULE_DEVICE_TABLE(of, mpq8785_of_match);
+static int mpq8785_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ enum chips chip_id;
+ u32 voltage_scale;
+ int ret;
+
+ info = devm_kmemdup(dev, &mpq8785_info, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (dev->of_node)
+ chip_id = (kernel_ulong_t)of_device_get_match_data(dev);
+ else
+ chip_id = (kernel_ulong_t)i2c_get_match_data(client);
+
+ switch (chip_id) {
+ case mpm3695:
+ case mpm3695_25:
+ case mpm82504:
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->m[PSC_VOLTAGE_OUT] = 8;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = 2;
+ info->read_word_data = mpm82504_read_word_data;
+ break;
+ case mpq8785:
+ info->identify = mpq8785_identify;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (!device_property_read_u32(dev, "mps,vout-fb-divider-ratio-permille",
+ &voltage_scale)) {
+ if (voltage_scale > voltage_scale_loop_max_val[chip_id])
+ return -EINVAL;
+
+ ret = i2c_smbus_write_word_data(client, PMBUS_VOUT_SCALE_LOOP,
+ voltage_scale);
+ if (ret)
+ return ret;
+ }
+
+ return pmbus_do_probe(client, info);
+};
+
static struct i2c_driver mpq8785_driver = {
.driver = {
.name = "mpq8785",
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index ddb19c9726d6..d2e9bfb5320f 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -482,6 +482,7 @@ struct pmbus_driver_info {
*/
int access_delay; /* in microseconds */
int write_delay; /* in microseconds */
+ int page_change_delay; /* in microseconds */
};
/* Regulator ops */
@@ -508,11 +509,11 @@ int pmbus_regulator_init_cb(struct regulator_dev *rdev,
#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0, 0)
-#define PMBUS_REGULATOR_STEP_ONE(_name, _voltages, _step, _min_uV) \
+#define __PMBUS_REGULATOR_STEP_ONE(_name, _node, _voltages, _step, _min_uV) \
{ \
.name = (_name), \
.of_match = of_match_ptr(_name), \
- .regulators_node = of_match_ptr("regulators"), \
+ .regulators_node = of_match_ptr(_node), \
.ops = &pmbus_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -522,7 +523,19 @@ int pmbus_regulator_init_cb(struct regulator_dev *rdev,
.init_cb = pmbus_regulator_init_cb, \
}
-#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
+/*
+ * _NODE macros are defined for historic reasons and MUST NOT be used in new
+ * drivers.
+ */
+#define PMBUS_REGULATOR_STEP_ONE_NODE(_name, _voltages, _step, _min_uV) \
+ __PMBUS_REGULATOR_STEP_ONE(_name, "regulators", _voltages, _step, _min_uV)
+
+#define PMBUS_REGULATOR_ONE_NODE(_name) PMBUS_REGULATOR_STEP_ONE_NODE(_name, 0, 0, 0)
+
+#define PMBUS_REGULATOR_STEP_ONE(_name, _voltages, _step, _min_uV) \
+ __PMBUS_REGULATOR_STEP_ONE(_name, NULL, _voltages, _step, _min_uV)
+
+#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
/* Function declarations */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index cfeba2e4c5c3..be6d05def115 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -32,6 +32,13 @@
#define PMBUS_ATTR_ALLOC_SIZE 32
#define PMBUS_NAME_SIZE 24
+/*
+ * The type of operation used for picking the delay between
+ * successive pmbus operations.
+ */
+#define PMBUS_OP_WRITE BIT(0)
+#define PMBUS_OP_PAGE_CHANGE BIT(1)
+
static int wp = -1;
module_param(wp, int, 0444);
@@ -113,8 +120,8 @@ struct pmbus_data {
int vout_low[PMBUS_PAGES]; /* voltage low margin */
int vout_high[PMBUS_PAGES]; /* voltage high margin */
- ktime_t write_time; /* Last SMBUS write timestamp */
- ktime_t access_time; /* Last SMBUS access timestamp */
+
+ ktime_t next_access_backoff; /* Wait until at least this time */
};
struct pmbus_debugfs_entry {
@@ -169,32 +176,26 @@ EXPORT_SYMBOL_NS_GPL(pmbus_set_update, "PMBUS");
static void pmbus_wait(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
- const struct pmbus_driver_info *info = data->info;
- s64 delta;
+ s64 delay = ktime_us_delta(data->next_access_backoff, ktime_get());
- if (info->access_delay) {
- delta = ktime_us_delta(ktime_get(), data->access_time);
-
- if (delta < info->access_delay)
- fsleep(info->access_delay - delta);
- } else if (info->write_delay) {
- delta = ktime_us_delta(ktime_get(), data->write_time);
-
- if (delta < info->write_delay)
- fsleep(info->write_delay - delta);
- }
+ if (delay > 0)
+ fsleep(delay);
}
-/* Sets the last accessed timestamp for pmbus_wait */
-static void pmbus_update_ts(struct i2c_client *client, bool write_op)
+/* Sets the last operation timestamp for pmbus_wait */
+static void pmbus_update_ts(struct i2c_client *client, int op)
{
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
+ int delay = info->access_delay;
+
+ if (op & PMBUS_OP_WRITE)
+ delay = max(delay, info->write_delay);
+ if (op & PMBUS_OP_PAGE_CHANGE)
+ delay = max(delay, info->page_change_delay);
- if (info->access_delay)
- data->access_time = ktime_get();
- else if (info->write_delay && write_op)
- data->write_time = ktime_get();
+ if (delay > 0)
+ data->next_access_backoff = ktime_add_us(ktime_get(), delay);
}
int pmbus_set_page(struct i2c_client *client, int page, int phase)
@@ -209,13 +210,13 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
data->info->pages > 1 && page != data->currpage) {
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE | PMBUS_OP_PAGE_CHANGE);
if (rv < 0)
return rv;
pmbus_wait(client);
rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (rv < 0)
return rv;
@@ -229,7 +230,7 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PHASE,
phase);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
if (rv)
return rv;
}
@@ -249,7 +250,7 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
pmbus_wait(client);
rv = i2c_smbus_write_byte(client, value);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -284,7 +285,7 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
pmbus_wait(client);
rv = i2c_smbus_write_word_data(client, reg, word);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -405,7 +406,7 @@ int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg)
pmbus_wait(client);
rv = i2c_smbus_read_word_data(client, reg);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -468,7 +469,7 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
pmbus_wait(client);
rv = i2c_smbus_read_byte_data(client, reg);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -484,7 +485,7 @@ int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, reg, value);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -520,7 +521,7 @@ static int pmbus_read_block_data(struct i2c_client *client, int page, u8 reg,
pmbus_wait(client);
rv = i2c_smbus_read_block_data(client, reg, data_buf);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -2524,7 +2525,7 @@ static int pmbus_read_coefficients(struct i2c_client *client,
rv = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, PMBUS_COEFFICIENTS,
I2C_SMBUS_BLOCK_PROC_CALL, &data);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
if (rv < 0)
return rv;
@@ -2728,7 +2729,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
if (!(data->flags & PMBUS_NO_CAPABILITY)) {
pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) {
if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC))
@@ -2744,13 +2745,13 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
data->read_status = pmbus_read_status_word;
pmbus_wait(client);
ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret < 0 || ret == 0xffff) {
data->read_status = pmbus_read_status_byte;
pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret < 0 || ret == 0xff) {
dev_err(dev, "PMBus status register not found\n");
diff --git a/drivers/hwmon/pmbus/tda38640.c b/drivers/hwmon/pmbus/tda38640.c
index 07fe58c24485..d902d39f49f4 100644
--- a/drivers/hwmon/pmbus/tda38640.c
+++ b/drivers/hwmon/pmbus/tda38640.c
@@ -15,7 +15,7 @@
#include "pmbus.h"
static const struct regulator_desc __maybe_unused tda38640_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
struct tda38640_data {
diff --git a/drivers/hwmon/pmbus/tps25990.c b/drivers/hwmon/pmbus/tps25990.c
index 0d2655e69549..c13edd7e1abf 100644
--- a/drivers/hwmon/pmbus/tps25990.c
+++ b/drivers/hwmon/pmbus/tps25990.c
@@ -333,7 +333,7 @@ static int tps25990_write_byte_data(struct i2c_client *client,
#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
static const struct regulator_desc tps25990_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
#endif
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 9b0eadc81a2e..2bc8cccb01fd 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -212,8 +212,8 @@ static int ucd9000_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(ret & UCD9000_GPIO_CONFIG_STATUS);
}
-static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct i2c_client *client = gpiochip_get_data(gc);
int ret;
@@ -222,17 +222,17 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0) {
dev_dbg(&client->dev, "failed to read GPIO %d config: %d\n",
offset, ret);
- return;
+ return ret;
}
if (value) {
if (ret & UCD9000_GPIO_CONFIG_STATUS)
- return;
+ return 0;
ret |= UCD9000_GPIO_CONFIG_STATUS;
} else {
if (!(ret & UCD9000_GPIO_CONFIG_STATUS))
- return;
+ return 0;
ret &= ~UCD9000_GPIO_CONFIG_STATUS;
}
@@ -244,7 +244,7 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0) {
dev_dbg(&client->dev, "Failed to write GPIO %d config: %d\n",
offset, ret);
- return;
+ return ret;
}
ret &= ~UCD9000_GPIO_CONFIG_ENABLE;
@@ -253,6 +253,8 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0)
dev_dbg(&client->dev, "Failed to write GPIO %d config: %d\n",
offset, ret);
+
+ return ret;
}
static int ucd9000_gpio_get_direction(struct gpio_chip *gc,
@@ -362,7 +364,7 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
data->gpio.direction_input = ucd9000_gpio_direction_input;
data->gpio.direction_output = ucd9000_gpio_direction_output;
data->gpio.get = ucd9000_gpio_get;
- data->gpio.set = ucd9000_gpio_set;
+ data->gpio.set_rv = ucd9000_gpio_set;
data->gpio.can_sleep = true;
data->gpio.base = -1;
data->gpio.parent = &client->dev;
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index d506a5e7e033..2df294793f6e 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -620,8 +620,8 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (tach->irq == -EPROBE_DEFER)
return tach->irq;
if (tach->irq > 0) {
- ret = devm_request_irq(dev, tach->irq, pulse_handler, 0,
- pdev->name, tach);
+ ret = devm_request_irq(dev, tach->irq, pulse_handler,
+ IRQF_NO_THREAD, pdev->name, tach);
if (ret) {
dev_err(dev,
"Failed to request interrupt: %d\n",
diff --git a/drivers/hwmon/qnap-mcu-hwmon.c b/drivers/hwmon/qnap-mcu-hwmon.c
index 29057514739c..e86e64c4d391 100644
--- a/drivers/hwmon/qnap-mcu-hwmon.c
+++ b/drivers/hwmon/qnap-mcu-hwmon.c
@@ -6,7 +6,6 @@
* Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
*/
-#include <linux/fwnode.h>
#include <linux/hwmon.h>
#include <linux/mfd/qnap-mcu.h>
#include <linux/module.h>
diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c
index 358152868d96..5da44571b6a0 100644
--- a/drivers/hwmon/spd5118.c
+++ b/drivers/hwmon/spd5118.c
@@ -66,6 +66,9 @@ static const unsigned short normal_i2c[] = {
#define SPD5118_EEPROM_BASE 0x80
#define SPD5118_EEPROM_SIZE (SPD5118_PAGE_SIZE * SPD5118_NUM_PAGES)
+#define PAGE_ADDR0(page) (((page) & BIT(0)) << 6)
+#define PAGE_ADDR1_4(page) (((page) & GENMASK(4, 1)) >> 1)
+
/* Temperature unit in millicelsius */
#define SPD5118_TEMP_UNIT (MILLIDEGREE_PER_DEGREE / 4)
/* Representable temperature range in millicelsius */
@@ -75,6 +78,7 @@ static const unsigned short normal_i2c[] = {
struct spd5118_data {
struct regmap *regmap;
struct mutex nvmem_lock;
+ bool is_16bit;
};
/* hwmon */
@@ -305,51 +309,6 @@ static bool spd5118_vendor_valid(u8 bank, u8 id)
return id && id != 0x7f;
}
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
- int regval;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
-
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval != 0x5118)
- return -ENODEV;
-
- regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
- if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY);
- if (regval < 0)
- return -ENODEV;
- if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR);
- if (regval)
- return -ENODEV;
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR);
- if (regval)
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION);
- if (regval < 0 || (regval & 0xc1))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG);
- if (regval < 0)
- return -ENODEV;
- if (regval & ~SPD5118_TS_DISABLE)
- return -ENODEV;
-
- strscpy(info->type, "spd5118", I2C_NAME_SIZE);
- return 0;
-}
-
static const struct hwmon_channel_info *spd5118_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_REGISTER_TZ),
@@ -376,11 +335,12 @@ static const struct hwmon_chip_info spd5118_chip_info = {
/* nvmem */
-static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf,
+static ssize_t spd5118_nvmem_read_page(struct spd5118_data *data, char *buf,
unsigned int offset, size_t count)
{
- int addr = (offset >> SPD5118_PAGE_SHIFT) * 0x100 + SPD5118_EEPROM_BASE;
- int err;
+ int page = offset >> SPD5118_PAGE_SHIFT;
+ struct regmap *regmap = data->regmap;
+ int err, addr;
offset &= SPD5118_PAGE_MASK;
@@ -388,6 +348,12 @@ static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf,
if (offset + count > SPD5118_PAGE_SIZE)
count = SPD5118_PAGE_SIZE - offset;
+ if (data->is_16bit) {
+ addr = SPD5118_EEPROM_BASE | PAGE_ADDR0(page) |
+ (PAGE_ADDR1_4(page) << 8);
+ } else {
+ addr = page * 0x100 + SPD5118_EEPROM_BASE;
+ }
err = regmap_bulk_read(regmap, addr + offset, buf, count);
if (err)
return err;
@@ -410,7 +376,7 @@ static int spd5118_nvmem_read(void *priv, unsigned int off, void *val, size_t co
mutex_lock(&data->nvmem_lock);
while (count) {
- ret = spd5118_nvmem_read_page(data->regmap, buf, off, count);
+ ret = spd5118_nvmem_read_page(data, buf, off, count);
if (ret < 0) {
mutex_unlock(&data->nvmem_lock);
return ret;
@@ -483,7 +449,7 @@ static bool spd5118_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = {
+static const struct regmap_range_cfg spd5118_i2c_regmap_range_cfg[] = {
{
.selector_reg = SPD5118_REG_I2C_LEGACY_MODE,
.selector_mask = SPD5118_LEGACY_PAGE_MASK,
@@ -495,7 +461,7 @@ static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = {
},
};
-static const struct regmap_config spd5118_regmap_config = {
+static const struct regmap_config spd5118_regmap8_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x7ff,
@@ -503,89 +469,76 @@ static const struct regmap_config spd5118_regmap_config = {
.volatile_reg = spd5118_volatile_reg,
.cache_type = REGCACHE_MAPLE,
- .ranges = spd5118_regmap_range_cfg,
- .num_ranges = ARRAY_SIZE(spd5118_regmap_range_cfg),
+ .ranges = spd5118_i2c_regmap_range_cfg,
+ .num_ranges = ARRAY_SIZE(spd5118_i2c_regmap_range_cfg),
};
-static int spd5118_init(struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
- int err, regval, mode;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+static const struct regmap_config spd5118_regmap16_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x7ff,
+ .writeable_reg = spd5118_writeable_reg,
+ .volatile_reg = spd5118_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
+};
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval < 0 || (regval && regval != 0x5118))
- return -ENODEV;
+static int spd5118_suspend(struct device *dev)
+{
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u32 regval;
+ int err;
/*
- * If the device type registers return 0, it is possible that the chip
- * has a non-zero page selected and takes the specification literally,
- * i.e. disables access to volatile registers besides the page register
- * if the page is not 0. Try to identify such chips.
+ * Make sure the configuration register in the regmap cache is current
+ * before bypassing it.
*/
- if (!regval) {
- /* Vendor ID registers must also be 0 */
- regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
- if (regval)
- return -ENODEV;
-
- /* The selected page in MR11 must not be 0 */
- mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
- if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) ||
- !(mode & SPD5118_LEGACY_PAGE_MASK))
- return -ENODEV;
+ err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
+ if (err < 0)
+ return err;
- err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE,
- mode & SPD5118_LEGACY_MODE_ADDR);
- if (err)
- return -ENODEV;
+ regcache_cache_bypass(regmap, true);
+ regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE,
+ SPD5118_TS_DISABLE);
+ regcache_cache_bypass(regmap, false);
- /*
- * If the device type registers are still bad after selecting
- * page 0, this is not a SPD5118 device. Restore original
- * legacy mode register value and abort.
- */
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval != 0x5118) {
- i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode);
- return -ENODEV;
- }
- }
+ regcache_cache_only(regmap, true);
+ regcache_mark_dirty(regmap);
- /* We are reasonably sure that this is really a SPD5118 hub controller */
return 0;
}
-static int spd5118_probe(struct i2c_client *client)
+static int spd5118_resume(struct device *dev)
{
- struct device *dev = &client->dev;
- unsigned int regval, revision, vendor, bank;
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+
+ regcache_cache_only(regmap, false);
+ return regcache_sync(regmap);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
+
+static int spd5118_common_probe(struct device *dev, struct regmap *regmap,
+ bool is_16bit)
+{
+ unsigned int capability, revision, vendor, bank;
struct spd5118_data *data;
struct device *hwmon_dev;
- struct regmap *regmap;
int err;
- err = spd5118_init(client);
- if (err)
- return err;
-
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- regmap = devm_regmap_init_i2c(client, &spd5118_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n");
-
- err = regmap_read(regmap, SPD5118_REG_CAPABILITY, &regval);
+ err = regmap_read(regmap, SPD5118_REG_CAPABILITY, &capability);
if (err)
return err;
- if (!(regval & SPD5118_CAP_TS_SUPPORT))
+ if (!(capability & SPD5118_CAP_TS_SUPPORT))
return -ENODEV;
+ data->is_16bit = is_16bit;
+
err = regmap_read(regmap, SPD5118_REG_REVISION, &revision);
if (err)
return err;
@@ -627,48 +580,176 @@ static int spd5118_probe(struct i2c_client *client)
return 0;
}
-static int spd5118_suspend(struct device *dev)
+/* I2C */
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info)
{
- struct spd5118_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
- u32 regval;
- int err;
+ struct i2c_adapter *adapter = client->adapter;
+ int regval;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY);
+ if (regval < 0)
+ return -ENODEV;
+ if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR);
+ if (regval)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR);
+ if (regval)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION);
+ if (regval < 0 || (regval & 0xc1))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG);
+ if (regval < 0)
+ return -ENODEV;
+ if (regval & ~SPD5118_TS_DISABLE)
+ return -ENODEV;
+
+ strscpy(info->type, "spd5118", I2C_NAME_SIZE);
+ return 0;
+}
+
+static int spd5118_i2c_init(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int err, regval, mode;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval < 0 || (regval && regval != 0x5118))
+ return -ENODEV;
/*
- * Make sure the configuration register in the regmap cache is current
- * before bypassing it.
+ * If the device type registers return 0, it is possible that the chip
+ * has a non-zero page selected and takes the specification literally,
+ * i.e. disables access to volatile registers besides the page register
+ * if the page is not 0. The Renesas/ITD SPD5118 Hub Controller is known
+ * to show this behavior. Try to identify such chips.
*/
- err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
- if (err < 0)
- return err;
+ if (!regval) {
+ /* Vendor ID registers must also be 0 */
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval)
+ return -ENODEV;
- regcache_cache_bypass(regmap, true);
- regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE,
- SPD5118_TS_DISABLE);
- regcache_cache_bypass(regmap, false);
+ /* The selected page in MR11 must not be 0 */
+ mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
+ if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) ||
+ !(mode & SPD5118_LEGACY_PAGE_MASK))
+ return -ENODEV;
- regcache_cache_only(regmap, true);
- regcache_mark_dirty(regmap);
+ err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE,
+ mode & SPD5118_LEGACY_MODE_ADDR);
+ if (err)
+ return -ENODEV;
+ /*
+ * If the device type registers are still bad after selecting
+ * page 0, this is not a SPD5118 device. Restore original
+ * legacy mode register value and abort.
+ */
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118) {
+ i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode);
+ return -ENODEV;
+ }
+ }
+
+ /* We are reasonably sure that this is really a SPD5118 hub controller */
return 0;
}
-static int spd5118_resume(struct device *dev)
+/*
+ * 16-bit addressing note:
+ *
+ * If I2C_FUNC_I2C is not supported by an I2C adapter driver, regmap uses
+ * SMBus operations as alternative. To simulate a read operation with a 16-bit
+ * address, it writes the address using i2c_smbus_write_byte_data(), followed
+ * by one or more calls to i2c_smbus_read_byte() to read the data.
+ * Per spd5118 standard, a read operation after writing the address must start
+ * with <Sr> (Repeat Start). However, a SMBus read byte operation starts with
+ * <S> (Start). This resets the register address in the spd5118 chip. As result,
+ * i2c_smbus_read_byte() always returns data from register address 0x00.
+ *
+ * A working alternative to access chips with 16-bit register addresses in the
+ * absence of I2C_FUNC_I2C support is not known.
+ *
+ * For this reason, 16-bit addressing can only be supported with I2C if the
+ * adapter supports I2C_FUNC_I2C.
+ *
+ * For I2C, the addressing mode selected by the BIOS must not be changed.
+ * Experiments show that at least some PC BIOS versions will not change the
+ * addressing mode on a soft reboot and end up in setup, claiming that some
+ * configuration change happened. This will happen again after a power cycle,
+ * which does reset the addressing mode. To prevent this from happening,
+ * detect if 16-bit addressing is enabled and always use the currently
+ * configured addressing mode.
+ */
+
+static int spd5118_i2c_probe(struct i2c_client *client)
{
- struct spd5118_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
+ const struct regmap_config *config;
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+ int err, mode;
+ bool is_16bit;
- regcache_cache_only(regmap, false);
- return regcache_sync(regmap);
-}
+ err = spd5118_i2c_init(client);
+ if (err)
+ return err;
-static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
+ mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
+ if (mode < 0)
+ return mode;
+
+ is_16bit = mode & SPD5118_LEGACY_MODE_ADDR;
+ if (is_16bit) {
+ /*
+ * See 16-bit addressing note above explaining why it is
+ * necessary to check for I2C_FUNC_I2C support here.
+ */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "Adapter does not support 16-bit register addresses\n");
+ return -ENODEV;
+ }
+ config = &spd5118_regmap16_config;
+ } else {
+ config = &spd5118_regmap8_config;
+ }
+
+ regmap = devm_regmap_init_i2c(client, config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n");
+
+ return spd5118_common_probe(dev, regmap, is_16bit);
+}
-static const struct i2c_device_id spd5118_id[] = {
+static const struct i2c_device_id spd5118_i2c_id[] = {
{ "spd5118" },
{ }
};
-MODULE_DEVICE_TABLE(i2c, spd5118_id);
+MODULE_DEVICE_TABLE(i2c, spd5118_i2c_id);
static const struct of_device_id spd5118_of_ids[] = {
{ .compatible = "jedec,spd5118", },
@@ -676,20 +757,20 @@ static const struct of_device_id spd5118_of_ids[] = {
};
MODULE_DEVICE_TABLE(of, spd5118_of_ids);
-static struct i2c_driver spd5118_driver = {
+static struct i2c_driver spd5118_i2c_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "spd5118",
.of_match_table = spd5118_of_ids,
.pm = pm_sleep_ptr(&spd5118_pm_ops),
},
- .probe = spd5118_probe,
- .id_table = spd5118_id,
+ .probe = spd5118_i2c_probe,
+ .id_table = spd5118_i2c_id,
.detect = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? spd5118_detect : NULL,
.address_list = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? normal_i2c : NULL,
};
-module_i2c_driver(spd5118_driver);
+module_i2c_driver(spd5118_i2c_driver);
MODULE_AUTHOR("René Rebe <rene@exactcode.de>");
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 8af44a33055f..a02daa496c9c 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/of.h>
#define DRIVER_NAME "tmp102"
@@ -204,6 +205,10 @@ static int tmp102_probe(struct i2c_client *client)
return -ENODEV;
}
+ err = devm_regulator_get_enable_optional(dev, "vcc");
+ if (err < 0 && err != -ENODEV)
+ return dev_err_probe(dev, err, "Failed to enable regulator\n");
+
tmp102 = devm_kzalloc(dev, sizeof(*tmp102), GFP_KERNEL);
if (!tmp102)
return -ENOMEM;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 2cdbd5f107a2..11c5d80428cd 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -103,8 +103,6 @@ struct xgene_hwmon_dev {
struct device *hwmon_dev;
bool temp_critical_alarm;
- phys_addr_t comm_base_addr;
- void *pcc_comm_addr;
unsigned int usecs_lat;
};
@@ -125,7 +123,8 @@ static u16 xgene_word_tst_and_clr(u16 *addr, u16 mask)
static int xgene_hwmon_pcc_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
{
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
u32 *ptr = (void *)(generic_comm_base + 1);
int rc, i;
u16 val;
@@ -523,7 +522,8 @@ static void xgene_hwmon_rx_cb(struct mbox_client *cl, void *msg)
static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
{
struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
struct slimpro_resp_msg amsg;
/*
@@ -649,7 +649,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
} else {
struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
- int version;
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
@@ -658,8 +657,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out_mbox_free;
}
- version = (int)acpi_id->driver_data;
-
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
dev_err(&pdev->dev, "no pcc-channel property\n");
@@ -686,34 +683,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
/*
- * This is the shared communication region
- * for the OS and Platform to communicate over.
- */
- ctx->comm_base_addr = pcc_chan->shmem_base_addr;
- if (ctx->comm_base_addr) {
- if (version == XGENE_HWMON_V2)
- ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
- ctx->comm_base_addr,
- pcc_chan->shmem_size);
- else
- ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
- } else {
- dev_err(&pdev->dev, "Failed to get PCC comm region\n");
- rc = -ENODEV;
- goto out;
- }
-
- if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
- dev_err(&pdev->dev,
- "Failed to ioremap PCC comm region\n");
- rc = -ENOMEM;
- goto out;
- }
-
- /*
* pcc_chan->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 77da199c7413..7b30db3253af 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config CDNS_I3C_MASTER
tristate "Cadence I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -9,7 +8,6 @@ config CDNS_I3C_MASTER
config DW_I3C_MASTER
tristate "Synospsys DesignWare I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
# ALPHA and PARISC needs {read,write}sl()
@@ -38,7 +36,6 @@ config AST2600_I3C_MASTER
config SVC_I3C_MASTER
tristate "Silvaco I3C Dual-Role Master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -46,7 +43,6 @@ config SVC_I3C_MASTER
config MIPI_I3C_HCI
tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
- depends on I3C
depends on HAS_IOMEM
help
Support for hardware following the MIPI Aliance's I3C Host Controller
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index a71226d7ca59..bc4538694540 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -78,7 +78,7 @@
#define INTR_SIGNAL_ENABLE 0x28
#define INTR_FORCE 0x2c
#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
-#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
+#define INTR_HC_SEQ_CANCEL BIT(11) /* HC Cancelled Transaction Sequence */
#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
#define DAT_SECTION 0x30 /* Device Address Table */
@@ -590,26 +590,27 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
u32 val;
val = reg_read(INTR_STATUS);
+ reg_write(INTR_STATUS, val);
DBG("INTR_STATUS = %#x", val);
- if (val) {
- reg_write(INTR_STATUS, val);
- }
+ if (val)
+ result = IRQ_HANDLED;
- if (val & INTR_HC_RESET_CANCEL) {
- DBG("cancelled reset");
- val &= ~INTR_HC_RESET_CANCEL;
+ if (val & INTR_HC_SEQ_CANCEL) {
+ dev_dbg(&hci->master.dev,
+ "Host Controller Cancelled Transaction Sequence\n");
+ val &= ~INTR_HC_SEQ_CANCEL;
}
if (val & INTR_HC_INTERNAL_ERR) {
dev_err(&hci->master.dev, "Host Controller Internal Error\n");
val &= ~INTR_HC_INTERNAL_ERR;
}
- hci->io->irq_handler(hci);
-
if (val)
- dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
- else
+ dev_warn_once(&hci->master.dev,
+ "unexpected INTR_STATUS %#x\n", val);
+
+ if (hci->io->irq_handler(hci))
result = IRQ_HANDLED;
return result;
@@ -699,9 +700,14 @@ static int i3c_hci_init(struct i3c_hci *hci)
if (ret)
return -ENXIO;
- /* Disable all interrupts and allow all signal updates */
+ /* Disable all interrupts */
reg_write(INTR_SIGNAL_ENABLE, 0x0);
- reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+ /*
+ * Only allow bit 31:10 signal updates because
+ * Bit 0:9 are reserved in IP version >= 0.8
+ * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code
+ */
+ reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10));
/* Make sure our data ordering fits the host's */
regval = reg_read(HC_CONTROL);
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 85e16de208d3..7e1a7cb94b43 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -201,11 +201,10 @@ struct svc_i3c_drvdata {
* @addrs: Array containing the dynamic addresses of each attached device
* @descs: Array of descriptors, one per attached device
* @hj_work: Hot-join work
- * @ibi_work: IBI work
* @irq: Main interrupt
- * @pclk: System clock
+ * @num_clks: I3C clock number
* @fclk: Fast clock (bus)
- * @sclk: Slow clock (other events)
+ * @clks: I3C clock array
* @xferqueue: Transfer queue structure
* @xferqueue.list: List member
* @xferqueue.cur: Current ongoing transfer
@@ -229,11 +228,10 @@ struct svc_i3c_master {
u8 addrs[SVC_I3C_MAX_DEVS];
struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
struct work_struct hj_work;
- struct work_struct ibi_work;
int irq;
- struct clk *pclk;
+ int num_clks;
struct clk *fclk;
- struct clk *sclk;
+ struct clk_bulk_data *clks;
struct {
struct list_head list;
struct svc_i3c_xfer *cur;
@@ -487,9 +485,8 @@ static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 msta
return ret;
}
-static void svc_i3c_master_ibi_work(struct work_struct *work)
+static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
{
- struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
struct svc_i3c_i2c_dev_data *data;
unsigned int ibitype, ibiaddr;
struct i3c_dev_desc *dev;
@@ -504,7 +501,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
* schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
* any irq or schedule happen during transaction.
*/
- guard(spinlock_irqsave)(&master->xferqueue.lock);
+ guard(spinlock)(&master->xferqueue.lock);
/*
* IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
@@ -530,7 +527,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
if (ret) {
dev_err(master->dev, "Timeout when polling for IBIWON\n");
svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
+ return;
}
status = readl(master->regs + SVC_I3C_MSTATUS);
@@ -574,17 +571,17 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
+ return;
}
/* Handle the non critical tasks */
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ svc_i3c_master_emit_stop(master);
if (dev) {
i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
master->ibi.tbq_slot = NULL;
}
- svc_i3c_master_emit_stop(master);
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
svc_i3c_master_emit_stop(master);
@@ -597,9 +594,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
default:
break;
}
-
-reenable_ibis:
- svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
}
static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
@@ -618,10 +612,12 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
!SVC_I3C_MSTATUS_STATE_SLVREQ(active))
return IRQ_HANDLED;
- svc_i3c_master_disable_interrupts(master);
-
- /* Handle the interrupt in a non atomic context */
- queue_work(master->base.wq, &master->ibi_work);
+ /*
+ * The SDA line remains low until the request is processed.
+ * Receive the request in the interrupt context to respond promptly
+ * and restore the bus to idle state.
+ */
+ svc_i3c_master_ibi_isr(master);
return IRQ_HANDLED;
}
@@ -1281,9 +1277,9 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
bool rnw, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
- unsigned int *actual_len, bool continued)
+ unsigned int *actual_len, bool continued, bool repeat_start)
{
- int retry = 2;
+ int retry = repeat_start ? 1 : 2;
u32 reg;
int ret;
@@ -1468,7 +1464,7 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
cmd->addr, cmd->in, cmd->out,
cmd->len, &cmd->actual_len,
- cmd->continued);
+ cmd->continued, i > 0);
/* cmd->xfer is NULL if I2C or CCC transfer */
if (cmd->xfer)
cmd->xfer->actual_len = cmd->actual_len;
@@ -1875,42 +1871,11 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.set_speed = svc_i3c_master_set_speed,
};
-static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
-{
- int ret = 0;
-
- ret = clk_prepare_enable(master->pclk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(master->fclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- return ret;
- }
-
- ret = clk_prepare_enable(master->sclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- return ret;
- }
-
- return 0;
-}
-
-static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
-{
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- clk_disable_unprepare(master->sclk);
-}
-
static int svc_i3c_master_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct svc_i3c_master *master;
- int ret;
+ int ret, i;
master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
if (!master)
@@ -1924,30 +1889,33 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ master->num_clks = devm_clk_bulk_get_all(dev, &master->clks);
+ if (master->num_clks < 0)
+ return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n");
- master->fclk = devm_clk_get(dev, "fast_clk");
+ for (i = 0; i < master->num_clks; i++) {
+ if (!strcmp(master->clks[i].id, "fast_clk"))
+ break;
+ }
+
+ if (i == master->num_clks)
+ return dev_err_probe(dev, -EINVAL,
+ "can't get I3C peripheral clock\n");
+
+ master->fclk = master->clks[i].clk;
if (IS_ERR(master->fclk))
return PTR_ERR(master->fclk);
- master->sclk = devm_clk_get(dev, "slow_clk");
- if (IS_ERR(master->sclk))
- return PTR_ERR(master->sclk);
-
master->irq = platform_get_irq(pdev, 0);
if (master->irq < 0)
return master->irq;
master->dev = dev;
-
- ret = svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "can't enable I3C clocks\n");
INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
- INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
mutex_init(&master->lock);
ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
@@ -1998,7 +1966,7 @@ rpm_disable:
pm_runtime_set_suspended(&pdev->dev);
err_disable_clks:
- svc_i3c_master_unprepare_clks(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
return ret;
}
@@ -2036,7 +2004,7 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
struct svc_i3c_master *master = dev_get_drvdata(dev);
svc_i3c_save_regs(master);
- svc_i3c_master_unprepare_clks(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -2045,9 +2013,12 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
+ int ret;
pinctrl_pm_select_default_state(dev);
- svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
+ if (ret)
+ return ret;
svc_i3c_restore_regs(master);
diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c
index 63ebaf13ef19..f61ad0510f04 100644
--- a/drivers/iio/adc/qcom-spmi-rradc.c
+++ b/drivers/iio/adc/qcom-spmi-rradc.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Linaro Limited.
- * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ * Author: Casey Connolly <casey.connolly@linaro.org>
*
* This driver is for the Round Robin ADC found in the pmi8998 and pm660 PMICs.
*/
@@ -1016,5 +1016,5 @@ static struct platform_driver rradc_driver = {
module_platform_driver(rradc_driver);
MODULE_DESCRIPTION("QCOM SPMI PMIC RR ADC driver");
-MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 142170473e75..8670e58675c6 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -36,6 +36,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
#define CM_DIRECT_RETRY_CTX ((void *) 1UL)
+#define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
@@ -167,7 +168,7 @@ struct cm_port {
struct cm_device {
struct kref kref;
struct list_head list;
- spinlock_t mad_agent_lock;
+ rwlock_t mad_agent_lock;
struct ib_device *ib_device;
u8 ack_delay;
int going_down;
@@ -241,7 +242,6 @@ struct cm_id_private {
u8 initiator_depth;
u8 retry_count;
u8 rnr_retry_count;
- u8 service_timeout;
u8 target_ack_delay;
struct list_head work_list;
@@ -285,7 +285,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return ERR_PTR(-EINVAL);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
mad_agent = cm_id_priv->av.port->mad_agent;
if (!mad_agent) {
m = ERR_PTR(-EINVAL);
@@ -311,7 +311,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
m->ah = ah;
out:
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return m;
}
@@ -1297,10 +1297,10 @@ static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return cpu_to_be64(low_tid);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
if (cm_id_priv->av.port->mad_agent)
hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return cpu_to_be64(hi_tid | low_tid);
}
@@ -1872,7 +1872,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
static void cm_format_mra(struct cm_mra_msg *mra_msg,
struct cm_id_private *cm_id_priv,
- enum cm_msg_response msg_mraed, u8 service_timeout,
+ enum cm_msg_response msg_mraed,
const void *private_data, u8 private_data_len)
{
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
@@ -1881,7 +1881,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
be32_to_cpu(cm_id_priv->id.local_id));
IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
- IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
+ IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, CM_MRA_SETTING);
if (private_data && private_data_len)
IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
@@ -1960,7 +1960,7 @@ static void cm_dup_req_handler(struct cm_work *work,
switch (cm_id_priv->id.state) {
case IB_CM_MRA_REQ_SENT:
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REQ,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
break;
@@ -2454,7 +2454,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
cm_id_priv->private_data_len);
else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REP,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else
@@ -3094,26 +3094,13 @@ out:
return -EINVAL;
}
-int ib_send_cm_mra(struct ib_cm_id *cm_id,
- u8 service_timeout,
- const void *private_data,
- u8 private_data_len)
+int ib_prepare_cm_mra(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
enum ib_cm_state cm_state;
enum ib_cm_lap_state lap_state;
- enum cm_msg_response msg_response;
- void *data;
unsigned long flags;
- int ret;
-
- if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
- return -EINVAL;
-
- data = cm_copy_private_data(private_data, private_data_len);
- if (IS_ERR(data))
- return PTR_ERR(data);
+ int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3122,58 +3109,33 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REQ;
break;
case IB_CM_REP_RCVD:
cm_state = IB_CM_MRA_REP_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
- msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
fallthrough;
default:
- trace_icm_send_mra_unknown_err(&cm_id_priv->id);
+ trace_icm_prepare_mra_unknown_err(&cm_id_priv->id);
ret = -EINVAL;
goto error_unlock;
}
- if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
- msg = cm_alloc_msg(cm_id_priv);
- if (IS_ERR(msg)) {
- ret = PTR_ERR(msg);
- goto error_unlock;
- }
-
- cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- msg_response, service_timeout,
- private_data, private_data_len);
- trace_icm_send_mra(cm_id);
- ret = ib_post_send_mad(msg, NULL);
- if (ret)
- goto error_free_msg;
- }
-
cm_id->state = cm_state;
cm_id->lap_state = lap_state;
- cm_id_priv->service_timeout = service_timeout;
- cm_set_private_data(cm_id_priv, data, private_data_len);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return 0;
+ cm_set_private_data(cm_id_priv, NULL, 0);
-error_free_msg:
- cm_free_msg(msg);
error_unlock:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
return ret;
}
-EXPORT_SYMBOL(ib_send_cm_mra);
+EXPORT_SYMBOL(ib_prepare_cm_mra);
static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
@@ -3377,7 +3339,6 @@ static int cm_lap_handler(struct cm_work *work)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_OTHER,
- cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
@@ -3786,7 +3747,8 @@ static void cm_process_send_error(struct cm_id_private *cm_id_priv,
spin_lock_irq(&cm_id_priv->lock);
if (msg != cm_id_priv->msg) {
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_priv_msg(msg);
+ cm_free_msg(msg);
+ cm_deref_id(cm_id_priv);
return;
}
cm_free_priv_msg(msg);
@@ -4378,7 +4340,7 @@ static int cm_add_one(struct ib_device *ib_device)
return -ENOMEM;
kref_init(&cm_dev->kref);
- spin_lock_init(&cm_dev->mad_agent_lock);
+ rwlock_init(&cm_dev->mad_agent_lock);
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
cm_dev->going_down = 0;
@@ -4494,9 +4456,9 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
* The above ensures no call paths from the work are running,
* the remaining paths all take the mad_agent_lock.
*/
- spin_lock(&cm_dev->mad_agent_lock);
+ write_lock(&cm_dev->mad_agent_lock);
port->mad_agent = NULL;
- spin_unlock(&cm_dev->mad_agent_lock);
+ write_unlock(&cm_dev->mad_agent_lock);
ib_unregister_mad_agent(mad_agent);
ib_port_unregister_client_groups(ib_device, i,
cm_counter_groups);
diff --git a/drivers/infiniband/core/cm_trace.h b/drivers/infiniband/core/cm_trace.h
index 944d9071245d..4a4987da69d4 100644
--- a/drivers/infiniband/core/cm_trace.h
+++ b/drivers/infiniband/core/cm_trace.h
@@ -229,7 +229,7 @@ DEFINE_CM_ERR_EVENT(send_drep);
DEFINE_CM_ERR_EVENT(dreq_unknown);
DEFINE_CM_ERR_EVENT(send_unknown_rej);
DEFINE_CM_ERR_EVENT(rej_unknown);
-DEFINE_CM_ERR_EVENT(send_mra_unknown);
+DEFINE_CM_ERR_EVENT(prepare_mra_unknown);
DEFINE_CM_ERR_EVENT(mra_unknown);
DEFINE_CM_ERR_EVENT(qp_init);
DEFINE_CM_ERR_EVENT(qp_rtr);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ab31eefa916b..9b471548e7ae 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -46,7 +46,6 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_MAX_CM_RETRIES 15
-#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 16
#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
@@ -146,19 +145,6 @@ struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
}
EXPORT_SYMBOL(rdma_iw_cm_id);
-/**
- * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
- * @res: rdma resource tracking entry pointer
- */
-struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
-{
- struct rdma_id_private *id_priv =
- container_of(res, struct rdma_id_private, res);
-
- return &id_priv->id;
-}
-EXPORT_SYMBOL(rdma_res_to_id);
-
static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
@@ -2214,8 +2200,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
case IB_CM_REP_RECEIVED:
if (state == RDMA_CM_CONNECT &&
(id_priv->id.qp_type != IB_QPT_UD)) {
- trace_cm_send_mra(id_priv);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(id_priv);
+ ib_prepare_cm_mra(cm_id);
}
if (id_priv->id.qp) {
event.status = cma_rep_recv(id_priv);
@@ -2476,8 +2462,8 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
conn_id->id.qp_type != IB_QPT_UD) {
- trace_cm_send_mra(cm_id->context);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(cm_id->context);
+ ib_prepare_cm_mra(cm_id);
}
mutex_unlock(&conn_id->handler_mutex);
@@ -5245,7 +5231,8 @@ static int cma_netevent_callback(struct notifier_block *self,
neigh->ha, ETH_ALEN))
continue;
cma_id_get(current_id);
- queue_work(cma_wq, &current_id->id.net_work);
+ if (!queue_work(cma_wq, &current_id->id.net_work))
+ cma_id_put(current_id);
}
out:
spin_unlock_irqrestore(&id_table_lock, flags);
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
index dc622f3778be..3456d5f3aa47 100644
--- a/drivers/infiniband/core/cma_trace.h
+++ b/drivers/infiniband/core/cma_trace.h
@@ -55,7 +55,7 @@ DECLARE_EVENT_CLASS(cma_fsm_class,
DEFINE_CMA_FSM_EVENT(send_rtu);
DEFINE_CMA_FSM_EVENT(send_rej);
-DEFINE_CMA_FSM_EVENT(send_mra);
+DEFINE_CMA_FSM_EVENT(prepare_mra);
DEFINE_CMA_FSM_EVENT(send_sidr_req);
DEFINE_CMA_FSM_EVENT(send_sidr_rep);
DEFINE_CMA_FSM_EVENT(disconnect);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index f4486cbd8f45..62410578dec3 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -368,12 +368,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
/*
* CM_ID <-- DESTROYING
*
- * Clean up all resources associated with the connection and release
- * the initial reference taken by iw_create_cm_id.
- *
- * Returns true if and only if the last cm_id_priv reference has been dropped.
+ * Clean up all resources associated with the connection.
*/
-static bool destroy_cm_id(struct iw_cm_id *cm_id)
+static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
struct ib_qp *qp;
@@ -442,20 +439,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
}
-
- return iwcm_deref_id(cm_id_priv);
}
/*
- * This function is only called by the application thread and cannot
- * be called by the event thread. The function will wait for all
- * references to be released on the cm_id and then kfree the cm_id
- * object.
+ * Destroy cm_id. If the cm_id still has other references, wait for all
+ * references to be released on the cm_id and then release the initial
+ * reference taken by iw_create_cm_id.
*/
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{
- if (!destroy_cm_id(cm_id))
+ struct iwcm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ destroy_cm_id(cm_id);
+ if (refcount_read(&cm_id_priv->refcount) > 1)
flush_workqueue(iwcm_wq);
+ iwcm_deref_id(cm_id_priv);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -1035,8 +1034,10 @@ static void cm_work_handler(struct work_struct *_work)
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
ret = process_event(cm_id_priv, &levent);
- if (ret)
- WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
+ if (ret) {
+ destroy_cm_id(&cm_id_priv->id);
+ WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
+ }
} else
pr_debug("dropping event %d\n", levent.event);
if (iwcm_deref_id(cm_id_priv))
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 8af0619a39cd..b4b10e8a6495 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -158,7 +158,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah))
- return (void *) ah;
+ return ERR_CAST(ah);
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index c48ef6083020..c752ae9fad6c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -41,67 +41,72 @@
#include <linux/hugetlb.h>
#include <linux/interval_tree.h>
#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
#include <linux/pagemap.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
-static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
- const struct mmu_interval_notifier_ops *ops)
+static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp)
{
- int ret;
+ umem_odp->is_implicit_odp = 1;
+ umem_odp->umem.is_odp = 1;
+ mutex_init(&umem_odp->umem_mutex);
+}
+
+static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ const struct mmu_interval_notifier_ops *ops)
+{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+ size_t page_size = 1UL << umem_odp->page_shift;
+ struct hmm_dma_map *map;
+ unsigned long start;
+ unsigned long end;
+ size_t nr_entries;
+ int ret = 0;
umem_odp->umem.is_odp = 1;
mutex_init(&umem_odp->umem_mutex);
- if (!umem_odp->is_implicit_odp) {
- size_t page_size = 1UL << umem_odp->page_shift;
- unsigned long start;
- unsigned long end;
- size_t ndmas, npfns;
-
- start = ALIGN_DOWN(umem_odp->umem.address, page_size);
- if (check_add_overflow(umem_odp->umem.address,
- (unsigned long)umem_odp->umem.length,
- &end))
- return -EOVERFLOW;
- end = ALIGN(end, page_size);
- if (unlikely(end < page_size))
- return -EOVERFLOW;
-
- ndmas = (end - start) >> umem_odp->page_shift;
- if (!ndmas)
- return -EINVAL;
-
- npfns = (end - start) >> PAGE_SHIFT;
- umem_odp->pfn_list = kvcalloc(
- npfns, sizeof(*umem_odp->pfn_list),
- GFP_KERNEL | __GFP_NOWARN);
- if (!umem_odp->pfn_list)
- return -ENOMEM;
-
- umem_odp->dma_list = kvcalloc(
- ndmas, sizeof(*umem_odp->dma_list),
- GFP_KERNEL | __GFP_NOWARN);
- if (!umem_odp->dma_list) {
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+ if (check_add_overflow(umem_odp->umem.address,
+ (unsigned long)umem_odp->umem.length, &end))
+ return -EOVERFLOW;
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
+ return -EOVERFLOW;
+
+ nr_entries = (end - start) >> PAGE_SHIFT;
+ if (!(nr_entries * PAGE_SIZE / page_size))
+ return -EINVAL;
+
+ map = &umem_odp->map;
+ if (ib_uses_virt_dma(dev)) {
+ map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!map->pfn_list)
ret = -ENOMEM;
- goto out_pfn_list;
- }
+ } else
+ ret = hmm_dma_map_alloc(dev->dma_device, map,
+ (end - start) >> PAGE_SHIFT,
+ 1 << umem_odp->page_shift);
+ if (ret)
+ return ret;
- ret = mmu_interval_notifier_insert(&umem_odp->notifier,
- umem_odp->umem.owning_mm,
- start, end - start, ops);
- if (ret)
- goto out_dma_list;
- }
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm, start,
+ end - start, ops);
+ if (ret)
+ goto out_free_map;
return 0;
-out_dma_list:
- kvfree(umem_odp->dma_list);
-out_pfn_list:
- kvfree(umem_odp->pfn_list);
+out_free_map:
+ if (ib_uses_virt_dma(dev))
+ kfree(map->pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, map);
return ret;
}
@@ -120,7 +125,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
{
struct ib_umem *umem;
struct ib_umem_odp *umem_odp;
- int ret;
if (access & IB_ACCESS_HUGETLB)
return ERR_PTR(-EINVAL);
@@ -132,16 +136,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
umem->ibdev = device;
umem->writable = ib_access_writable(access);
umem->owning_mm = current->mm;
- umem_odp->is_implicit_odp = 1;
umem_odp->page_shift = PAGE_SHIFT;
umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- ret = ib_init_umem_odp(umem_odp, NULL);
- if (ret) {
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
- return ERR_PTR(ret);
- }
+ ib_init_umem_implicit_odp(umem_odp);
return umem_odp;
}
EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
@@ -262,74 +260,41 @@ err_put_pid:
}
EXPORT_SYMBOL(ib_umem_odp_get);
-void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
+static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+
/*
* Ensure that no more pages are mapped in the umem.
*
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- if (!umem_odp->is_implicit_odp) {
- mutex_lock(&umem_odp->umem_mutex);
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- mutex_unlock(&umem_odp->umem_mutex);
- mmu_interval_notifier_remove(&umem_odp->notifier);
- kvfree(umem_odp->dma_list);
- kvfree(umem_odp->pfn_list);
- }
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
+ mutex_lock(&umem_odp->umem_mutex);
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
+ mutex_unlock(&umem_odp->umem_mutex);
+ mmu_interval_notifier_remove(&umem_odp->notifier);
+ if (ib_uses_virt_dma(dev))
+ kfree(umem_odp->map.pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, &umem_odp->map);
}
-EXPORT_SYMBOL(ib_umem_odp_release);
-/*
- * Map for DMA and insert a single page into the on-demand paging page tables.
- *
- * @umem: the umem to insert the page to.
- * @dma_index: index in the umem to add the dma to.
- * @page: the page struct to map and add.
- * @access_mask: access permissions needed for this page.
- *
- * The function returns -EFAULT if the DMA mapping operation fails.
- *
- */
-static int ib_umem_odp_map_dma_single_page(
- struct ib_umem_odp *umem_odp,
- unsigned int dma_index,
- struct page *page,
- u64 access_mask)
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_device *dev = umem_odp->umem.ibdev;
- dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
-
- if (*dma_addr) {
- /*
- * If the page is already dma mapped it means it went through
- * a non-invalidating trasition, like read-only to writable.
- * Resync the flags.
- */
- *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
- return 0;
- }
+ if (!umem_odp->is_implicit_odp)
+ ib_umem_odp_free(umem_odp);
- *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
- DMA_BIDIRECTIONAL);
- if (ib_dma_mapping_error(dev, *dma_addr)) {
- *dma_addr = 0;
- return -EFAULT;
- }
- umem_odp->npages++;
- *dma_addr |= access_mask;
- return 0;
+ put_pid(umem_odp->tgid);
+ kfree(umem_odp);
}
+EXPORT_SYMBOL(ib_umem_odp_release);
/**
* ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
*
* Maps the range passed in the argument to DMA addresses.
- * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
* Upon success the ODP MR will be locked to let caller complete its device
* page table update.
*
@@ -357,9 +322,6 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
struct hmm_range range = {};
unsigned long timeout;
- if (access_mask == 0)
- return -EINVAL;
-
if (user_virt < ib_umem_start(umem_odp) ||
user_virt + bcnt > ib_umem_end(umem_odp))
return -EFAULT;
@@ -385,11 +347,11 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
if (fault) {
range.default_flags = HMM_PFN_REQ_FAULT;
- if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ if (access_mask & HMM_PFN_WRITE)
range.default_flags |= HMM_PFN_REQ_WRITE;
}
- range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
+ range.hmm_pfns = &(umem_odp->map.pfn_list[pfn_start_idx]);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
@@ -417,22 +379,17 @@ retry:
for (pfn_index = 0; pfn_index < num_pfns;
pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
- if (fault) {
- /*
- * Since we asked for hmm_range_fault() to populate
- * pages it shouldn't return an error entry on success.
- */
- WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
- WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
- } else {
- if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
- WARN_ON(umem_odp->dma_list[dma_index]);
- continue;
- }
- access_mask = ODP_READ_ALLOWED_BIT;
- if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
- }
+ /*
+ * Since we asked for hmm_range_fault() to populate
+ * pages it shouldn't return an error entry on success.
+ */
+ WARN_ON(fault && range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
+ WARN_ON(fault && !(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
+ if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID))
+ continue;
+
+ if (range.hmm_pfns[pfn_index] & HMM_PFN_DMA_MAPPED)
+ continue;
hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
/* If a hugepage was detected and ODP wasn't set for, the umem
@@ -445,15 +402,6 @@ retry:
__func__, hmm_order, page_shift);
break;
}
-
- ret = ib_umem_odp_map_dma_single_page(
- umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
- access_mask);
- if (ret < 0) {
- ibdev_dbg(umem_odp->umem.ibdev,
- "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
- break;
- }
}
/* upon success lock should stay on hold for the callee */
if (!ret)
@@ -473,45 +421,38 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
- dma_addr_t dma_addr;
- dma_addr_t dma;
- int idx;
- u64 addr;
struct ib_device *dev = umem_odp->umem.ibdev;
+ u64 addr;
lockdep_assert_held(&umem_odp->umem_mutex);
virt = max_t(u64, virt, ib_umem_start(umem_odp));
bound = min_t(u64, bound, ib_umem_end(umem_odp));
for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
- idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- dma = umem_odp->dma_list[idx];
-
- /* The access flags guaranteed a valid DMA address in case was NULL */
- if (dma) {
- unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
- struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
-
- dma_addr = dma & ODP_DMA_ADDR_MASK;
- ib_dma_unmap_page(dev, dma_addr,
- BIT(umem_odp->page_shift),
- DMA_BIDIRECTIONAL);
- if (dma & ODP_WRITE_ALLOWED_BIT) {
- struct page *head_page = compound_head(page);
- /*
- * set_page_dirty prefers being called with
- * the page lock. However, MMU notifiers are
- * called sometimes with and sometimes without
- * the lock. We rely on the umem_mutex instead
- * to prevent other mmu notifiers from
- * continuing and allowing the page mapping to
- * be removed.
- */
- set_page_dirty(head_page);
- }
- umem_odp->dma_list[idx] = 0;
- umem_odp->npages--;
+ u64 offset = addr - ib_umem_start(umem_odp);
+ size_t idx = offset >> umem_odp->page_shift;
+ unsigned long pfn = umem_odp->map.pfn_list[idx];
+
+ if (!hmm_dma_unmap_pfn(dev->dma_device, &umem_odp->map, idx))
+ goto clear;
+
+ if (pfn & HMM_PFN_WRITE) {
+ struct page *page = hmm_pfn_to_page(pfn);
+ struct page *head_page = compound_head(page);
+ /*
+ * set_page_dirty prefers being called with
+ * the page lock. However, MMU notifiers are
+ * called sometimes with and sometimes without
+ * the lock. We rely on the umem_mutex instead
+ * to prevent other mmu notifiers from
+ * continuing and allowing the page mapping to
+ * be removed.
+ */
+ set_page_dirty(head_page);
}
+ umem_odp->npages--;
+clear:
+ umem_odp->map.pfn_list[idx] &= ~HMM_PFN_FLAGS;
}
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3c3bb670c805..bc9fe3ceca4d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -193,7 +193,7 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
fd, attrs);
if (IS_ERR(uobj))
- return (void *)uobj;
+ return ERR_CAST(uobj);
uverbs_uobject_get(uobj);
uobj_put_read(uobj);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c5e78bbefbd0..75fde0fe9989 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -572,7 +572,7 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
GFP_KERNEL : GFP_ATOMIC);
if (IS_ERR(slave)) {
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
- return (void *)slave;
+ return ERR_CAST(slave);
}
ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
rdma_lag_put_ah_roce_slave(slave);
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
index af91d16c3c77..e632f1661b92 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.c
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -170,6 +170,9 @@ static int map_cc_config_offset_gen0_ext0(u32 offset, struct bnxt_qplib_cc_param
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
*val = ccparam->tcp_cp;
break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ *val = ccparam->inact_th;
+ break;
default:
return -EINVAL;
}
@@ -203,7 +206,7 @@ static ssize_t bnxt_re_cc_config_get(struct file *filp, char __user *buffer,
return simple_read_from_buffer(buffer, usr_buf_len, ppos, (u8 *)(buf), rc);
}
-static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
+static int bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
{
u32 modify_mask;
@@ -247,7 +250,9 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
ccparam->tcp_cp = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE:
+ return -EOPNOTSUPP;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ ccparam->inact_th = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE:
ccparam->time_pph = val;
@@ -258,17 +263,20 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
}
ccparam->mask = modify_mask;
+ return 0;
}
static int bnxt_re_configure_cc(struct bnxt_re_dev *rdev, u32 gen_ext, u32 offset, u32 val)
{
struct bnxt_qplib_cc_param ccparam = { };
+ int rc;
- /* Supporting only Gen 0 now */
- if (gen_ext == CC_CONFIG_GEN0_EXT0)
- bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
- else
- return -EINVAL;
+ if (gen_ext != CC_CONFIG_GEN0_EXT0)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
+ if (rc)
+ return rc;
bnxt_qplib_modify_cc(&rdev->qplib_res, &ccparam);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 457eecb99f96..be34c605d516 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1113,7 +1113,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
- if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
+ if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
req.qp_flags = cpu_to_le32(qp_flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index f231e886ad9d..9efd32a3dc55 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -846,7 +846,12 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
req.resp_addr = cpu_to_le64(sbuf.dma_addr);
- req.function_id = cpu_to_le32(fid);
+ if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf)
+ req.function_id =
+ cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID |
+ (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT));
+ else
+ req.function_id = cpu_to_le32(fid);
req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index b6e3141253c4..d6dde762921a 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -124,7 +124,6 @@ struct opa_mad_notice_attr {
} __packed ntc_2048;
};
- u8 class_data[];
};
#define IB_VLARB_LOWPRI_0_31 1
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 5a91cbda4aee..764286da2ce8 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1361,16 +1361,6 @@ void sc_flush(struct send_context *sc)
sc_wait_for_packet_egress(sc, 1);
}
-/* drop all packets on the context, no waiting until they are sent */
-void sc_drop(struct send_context *sc)
-{
- if (!sc)
- return;
-
- dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
- __func__, sc->sw_index, sc->hw_context);
-}
-
/*
* Start the software reaction to a context halt or SPC freeze:
* - mark the context as halted or frozen
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index d07cc6ea7c63..ab0f9a3a8d12 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -246,7 +246,6 @@ void sc_disable(struct send_context *sc);
int sc_restart(struct send_context *sc);
void sc_return_credits(struct send_context *sc);
void sc_flush(struct send_context *sc);
-void sc_drop(struct send_context *sc);
void sc_stop(struct send_context *sc, int bit);
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
pio_release_cb cb, void *arg);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 0d2b39b7c8b5..16a749d16ee9 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1521,24 +1521,6 @@ void sdma_all_running(struct hfi1_devdata *dd)
}
/**
- * sdma_all_idle() - called when the link goes down
- * @dd: hfi1_devdata
- *
- * This routine moves all engines to the idle state.
- */
-void sdma_all_idle(struct hfi1_devdata *dd)
-{
- struct sdma_engine *sde;
- unsigned int i;
-
- /* idle all engines */
- for (i = 0; i < dd->num_sdma; ++i) {
- sde = &dd->per_sdma[i];
- sdma_process_event(sde, sdma_event_e70_go_idle);
- }
-}
-
-/**
* sdma_start() - called to kick off state processing for all engines
* @dd: hfi1_devdata
*
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index d77246b48434..91dfd5d0c419 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -373,7 +373,6 @@ void sdma_start(struct hfi1_devdata *dd);
void sdma_exit(struct hfi1_devdata *dd);
void sdma_clean(struct hfi1_devdata *dd, size_t num_engines);
void sdma_all_running(struct hfi1_devdata *dd);
-void sdma_all_idle(struct hfi1_devdata *dd);
void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
void sdma_freeze(struct hfi1_devdata *dd);
void sdma_unfreeze(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index cf2d29098406..62b4f16dab27 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -53,7 +53,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
int ret = 0;
fd->entry_to_rb = kcalloc(uctxt->expected_count,
- sizeof(struct rb_node *),
+ sizeof(*fd->entry_to_rb),
GFP_KERNEL);
if (!fd->entry_to_rb)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 7917af8e6380..baf592e6f21b 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(src)
hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 4fc5b9d5fea8..307c35888b30 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -33,7 +33,6 @@
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 560a1d9de408..1dcc9cbb4678 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1027,6 +1027,26 @@ struct hns_roce_dev {
atomic64_t *dfx_cnt;
};
+enum hns_roce_trace_type {
+ TRACE_SQ,
+ TRACE_RQ,
+ TRACE_SRQ,
+};
+
+static inline const char *trace_type_to_str(enum hns_roce_trace_type type)
+{
+ switch (type) {
+ case TRACE_SQ:
+ return "SQ";
+ case TRACE_RQ:
+ return "RQ";
+ case TRACE_SRQ:
+ return "SRQ";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
{
return container_of(ib_dev, struct hns_roce_dev, ib_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 160e8927d364..fa8747656f25 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -43,13 +43,15 @@
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
+#define CREATE_TRACE_POINTS
+#include "hns_roce_trace.h"
+
enum {
CMD_RST_PRC_OTHERS,
CMD_RST_PRC_SUCCESS,
@@ -738,6 +740,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
else
ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+ trace_hns_sq_wqe(qp->qpn, wqe_idx, wqe, 1 << qp->sq.wqe_shift,
+ wr->wr_id, TRACE_SQ);
if (unlikely(ret)) {
*bad_wr = wr;
goto out;
@@ -807,6 +811,9 @@ static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
+
+ trace_hns_rq_wqe(hr_qp->qpn, wqe_idx, wqe, 1 << hr_qp->rq.wqe_shift,
+ wr->wr_id, TRACE_RQ);
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
@@ -943,7 +950,7 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
static void update_srq_db(struct hns_roce_srq *srq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- struct hns_roce_v2_db db;
+ struct hns_roce_v2_db db = {};
hr_reg_write(&db, DB_TAG, srq->srqn);
hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
@@ -984,6 +991,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
fill_wqe_idx(srq, wqe_idx);
srq->wrid[wqe_idx] = wr->wr_id;
+
+ trace_hns_srq_wqe(srq->srqn, wqe_idx, wqe, 1 << srq->wqe_shift,
+ wr->wr_id, TRACE_SRQ);
}
if (likely(nreq)) {
@@ -1311,6 +1321,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
tail = csq->head;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_req(hr_dev, &desc[i]);
+
csq->desc[csq->head++] = desc[i];
if (csq->head == csq->desc_num)
csq->head = 0;
@@ -1325,6 +1337,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_resp(hr_dev, &csq->desc[tail]);
+
/* check the result of hardware write back */
desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
@@ -4302,8 +4316,7 @@ static inline int get_pdn(struct ib_pd *ib_pd)
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
- struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+ struct hns_roce_v2_qp_context *context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -5122,7 +5135,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
- modify_qp_reset_to_init(ibqp, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, context);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
@@ -5313,6 +5326,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ trace_hns_sq_flush_cqe(hr_qp->qpn, hr_qp->sq.head, TRACE_SQ);
hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
hr_qp->state = IB_QPS_ERR;
@@ -5322,6 +5336,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+ trace_hns_rq_flush_cqe(hr_qp->qpn, hr_qp->rq.head, TRACE_RQ);
hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
@@ -6248,6 +6263,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
eq->sub_type = sub_type;
++eq->cons_index;
aeqe_found = IRQ_HANDLED;
+ trace_hns_ae_info(event_type, aeqe, eq->eqe_size);
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE_CNT]);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 91a5665465ff..bc7466830eaf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -34,6 +34,7 @@
#define _HNS_ROCE_HW_V2_H
#include <linux/bitops.h>
+#include "hnae3.h"
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 8d0b63d4b50a..e7a497cc125c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -37,7 +37,6 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 09da3496843b..93a48b41955b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -38,6 +38,7 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
+#include "hns_roce_trace.h"
static u32 hw_index_to_key(int ind)
{
@@ -159,6 +160,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ trace_hns_mr(mr);
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
@@ -1146,6 +1148,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
+ trace_hns_buf_attr(buf_attr);
/* The caller has its own buffer list and invokes the hns_roce_mtr_map()
* to finish the MTT configuration.
*/
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 356d98816949..f637b73b946e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -4,7 +4,6 @@
#include <rdma/rdma_cm.h>
#include <rdma/restrack.h>
#include <uapi/rdma/rdma_netlink.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_trace.h b/drivers/infiniband/hw/hns/hns_roce_trace.h
new file mode 100644
index 000000000000..59ceb591b3a1
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_trace.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2025 Hisilicon Limited.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns_roce
+
+#if !defined(__HNS_ROCE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HNS_ROCE_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/string_choices.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+
+DECLARE_EVENT_CLASS(flush_head_template,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, pi)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->pi = pi;
+ __entry->type = type;
+ ),
+
+ TP_printk("%s 0x%lx flush head 0x%x.",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->pi)
+);
+
+DEFINE_EVENT(flush_head_template, hns_sq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+DEFINE_EVENT(flush_head_template, hns_rq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+
+#define MAX_SGE_PER_WQE 64
+#define MAX_WQE_SIZE (MAX_SGE_PER_WQE * HNS_ROCE_SGE_SIZE)
+DECLARE_EVENT_CLASS(wqe_template,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len,
+ u64 id, enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, idx)
+ __array(u32, wqe,
+ MAX_WQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ __field(u64, id)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->idx = idx;
+ __entry->id = id;
+ __entry->len = len / sizeof(__le32);
+ __entry->type = type;
+ for (int i = 0; i < __entry->len; i++)
+ __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]);
+ ),
+
+ TP_printk("%s 0x%lx wqe(0x%x/0x%llx): %s",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->idx, __entry->id,
+ __print_array(__entry->wqe, __entry->len,
+ sizeof(__le32)))
+);
+
+DEFINE_EVENT(wqe_template, hns_sq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_rq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_srq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+
+TRACE_EVENT(hns_ae_info,
+ TP_PROTO(int event_type, void *aeqe, unsigned int len),
+ TP_ARGS(event_type, aeqe, len),
+
+ TP_STRUCT__entry(__field(int, event_type)
+ __array(u32, aeqe,
+ HNS_ROCE_V3_EQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(__entry->event_type = event_type;
+ __entry->len = len / sizeof(__le32);
+ for (int i = 0; i < __entry->len; i++)
+ __entry->aeqe[i] = le32_to_cpu(((__le32 *)aeqe)[i]);
+ ),
+
+ TP_printk("event %2d aeqe: %s", __entry->event_type,
+ __print_array(__entry->aeqe, __entry->len, sizeof(__le32)))
+);
+
+TRACE_EVENT(hns_mr,
+ TP_PROTO(struct hns_roce_mr *mr),
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(__field(u64, iova)
+ __field(u64, size)
+ __field(u32, key)
+ __field(u32, pd)
+ __field(u32, pbl_hop_num)
+ __field(u32, npages)
+ __field(int, type)
+ __field(int, enabled)
+ ),
+
+ TP_fast_assign(__entry->iova = mr->iova;
+ __entry->size = mr->size;
+ __entry->key = mr->key;
+ __entry->pd = mr->pd;
+ __entry->pbl_hop_num = mr->pbl_hop_num;
+ __entry->npages = mr->npages;
+ __entry->type = mr->type;
+ __entry->enabled = mr->enabled;
+ ),
+
+ TP_printk("iova:0x%llx, size:%llu, key:%u, pd:%u, pbl_hop:%u, npages:%u, type:%d, status:%d",
+ __entry->iova, __entry->size, __entry->key,
+ __entry->pd, __entry->pbl_hop_num, __entry->npages,
+ __entry->type, __entry->enabled)
+);
+
+TRACE_EVENT(hns_buf_attr,
+ TP_PROTO(struct hns_roce_buf_attr *attr),
+ TP_ARGS(attr),
+
+ TP_STRUCT__entry(__field(unsigned int, region_count)
+ __field(unsigned int, region0_size)
+ __field(int, region0_hopnum)
+ __field(unsigned int, region1_size)
+ __field(int, region1_hopnum)
+ __field(unsigned int, region2_size)
+ __field(int, region2_hopnum)
+ __field(unsigned int, page_shift)
+ __field(bool, mtt_only)
+ ),
+
+ TP_fast_assign(__entry->region_count = attr->region_count;
+ __entry->region0_size = attr->region[0].size;
+ __entry->region0_hopnum = attr->region[0].hopnum;
+ __entry->region1_size = attr->region[1].size;
+ __entry->region1_hopnum = attr->region[1].hopnum;
+ __entry->region2_size = attr->region[2].size;
+ __entry->region2_hopnum = attr->region[2].hopnum;
+ __entry->page_shift = attr->page_shift;
+ __entry->mtt_only = attr->mtt_only;
+ ),
+
+ TP_printk("rg cnt:%u, pg_sft:0x%x, mtt_only:%s, rg 0 (sz:%u, hop:%u), rg 1 (sz:%u, hop:%u), rg 2 (sz:%u, hop:%u)\n",
+ __entry->region_count, __entry->page_shift,
+ str_yes_no(__entry->mtt_only),
+ __entry->region0_size, __entry->region0_hopnum,
+ __entry->region1_size, __entry->region1_hopnum,
+ __entry->region2_size, __entry->region2_hopnum)
+);
+
+DECLARE_EVENT_CLASS(cmdq,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc),
+
+ TP_STRUCT__entry(__string(dev_name, dev_name(hr_dev->dev))
+ __field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __array(u32, data, 6)
+ ),
+
+ TP_fast_assign(__assign_str(dev_name);
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ for (int i = 0; i < 6; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);
+ ),
+
+ TP_printk("%s cmdq opcode:0x%x, flag:0x%x, retval:0x%x, data:%s\n",
+ __get_str(dev_name), __entry->opcode,
+ __entry->flag, __entry->retval,
+ __print_array(__entry->data, 6, sizeof(__le32)))
+);
+
+DEFINE_EVENT(cmdq, hns_cmdq_req,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+DEFINE_EVENT(cmdq, hns_cmdq_resp,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+
+#endif /* __HNS_ROCE_TRACE_H */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns_roce_trace
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 6aed6169c07d..99a7f1a6c0b5 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -3131,7 +3131,7 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
ibdev_dbg(to_ibdev(cqp->dev),
- "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
+ "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n",
cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
(u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
return 0;
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index e7ce6840755f..37ce35cb10e7 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -108,7 +108,7 @@ static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
chunk->vaddr = sd_entry->u.bp.addr.va + offset;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
ibdev_dbg(to_ibdev(dev),
- "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
+ "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%p fpm_addr = %llx\n",
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
return 0;
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 0fc4e2679218..28e154bbb50f 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -15,14 +15,12 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
bool is_rnic_cq;
u32 doorbell;
u32 buf_size;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
cq->cq_handle = INVALID_MANA_HANDLE;
@@ -65,7 +63,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
return err;
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
}
if (is_rnic_cq) {
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index b31089320aa5..165c0a1e67d1 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -101,103 +101,95 @@ static int mana_ib_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
+ struct gdma_context *gc = madev->mdev->gdma_context;
+ struct mana_context *mc = gc->mana.driver_data;
struct gdma_dev *mdev = madev->mdev;
struct net_device *ndev;
- struct mana_context *mc;
struct mana_ib_dev *dev;
u8 mac_addr[ETH_ALEN];
int ret;
- mc = mdev->driver_data;
-
dev = ib_alloc_device(mana_ib_dev, ib_dev);
if (!dev)
return -ENOMEM;
ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
-
- dev->ib_dev.phys_port_cnt = mc->num_ports;
-
- ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
- mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
-
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
-
- /*
- * num_comp_vectors needs to set to the max MSIX index
- * when interrupts and event queues are implemented
- */
- dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
- dev->ib_dev.dev.parent = mdev->gdma_context->dev;
-
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- if (!ndev) {
- ret = -ENODEV;
- ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
- goto free_ib_device;
- }
- ether_addr_copy(mac_addr, ndev->dev_addr);
- addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
- ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
- goto free_ib_device;
- }
-
- ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
- ret);
- goto free_ib_device;
- }
- dev->gdma_dev = &mdev->gdma_context->mana_ib;
-
- dev->nb.notifier_call = mana_ib_netdev_event;
- ret = register_netdevice_notifier(&dev->nb);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
- ret);
- goto deregister_device;
- }
-
- ret = mana_ib_gd_query_adapter_caps(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
- ret);
- goto deregister_net_notifier;
- }
-
- ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
-
- ret = mana_ib_create_eqs(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
- goto deregister_net_notifier;
- }
-
- ret = mana_ib_gd_create_rnic_adapter(dev);
- if (ret)
- goto destroy_eqs;
-
+ dev->ib_dev.num_comp_vectors = gc->max_num_queues;
+ dev->ib_dev.dev.parent = gc->dev;
+ dev->gdma_dev = mdev;
xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ);
- ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
- ret);
- goto destroy_rnic;
+
+ if (mana_ib_is_rnic(dev)) {
+ dev->ib_dev.phys_port_cnt = 1;
+ ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
+ if (!ndev) {
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
+ goto free_ib_device;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto free_ib_device;
+ }
+
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
+ ret);
+ goto free_ib_device;
+ }
+
+ ret = mana_ib_gd_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+
+ ret = mana_ib_create_eqs(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ret = mana_ib_gd_create_rnic_adapter(dev);
+ if (ret)
+ goto destroy_eqs;
+
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ goto destroy_rnic;
+ }
+ } else {
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+ ret = mana_eth_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret);
+ goto free_ib_device;
+ }
}
- dev->av_pool = dma_pool_create("mana_ib_av", mdev->gdma_context->dev,
- MANA_AV_BUFFER_SIZE, MANA_AV_BUFFER_SIZE, 0);
+ dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE,
+ MANA_AV_BUFFER_SIZE, 0);
if (!dev->av_pool) {
ret = -ENOMEM;
goto destroy_rnic;
}
- ret = ib_register_device(&dev->ib_dev, "mana_%d",
- mdev->gdma_context->dev);
+ ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
+ mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
+
+ ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d",
+ gc->dev);
if (ret)
goto deallocate_pool;
@@ -208,15 +200,16 @@ static int mana_ib_probe(struct auxiliary_device *adev,
deallocate_pool:
dma_pool_destroy(dev->av_pool);
destroy_rnic:
- xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
- mana_ib_destroy_eqs(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_destroy_eqs(dev);
deregister_net_notifier:
- unregister_netdevice_notifier(&dev->nb);
-deregister_device:
- mana_gd_deregister_device(dev->gdma_dev);
+ if (mana_ib_is_rnic(dev))
+ unregister_netdevice_notifier(&dev->nb);
free_ib_device:
+ xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
return ret;
}
@@ -227,25 +220,24 @@ static void mana_ib_remove(struct auxiliary_device *adev)
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
+ if (mana_ib_is_rnic(dev)) {
+ mana_ib_gd_destroy_rnic_adapter(dev);
+ mana_ib_destroy_eqs(dev);
+ unregister_netdevice_notifier(&dev->nb);
+ }
xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
- mana_ib_destroy_eqs(dev);
- unregister_netdevice_notifier(&dev->nb);
- mana_gd_deregister_device(dev->gdma_dev);
ib_dealloc_device(&dev->ib_dev);
}
static const struct auxiliary_device_id mana_id_table[] = {
- {
- .name = "mana.rdma",
- },
+ { .name = "mana.rdma", },
+ { .name = "mana.eth", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mana_id_table);
static struct auxiliary_driver mana_driver = {
- .name = "rdma",
.probe = mana_ib_probe,
.remove = mana_ib_remove,
.id_table = mana_id_table,
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index eda9c5b971de..41a24a186f9d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -4,6 +4,7 @@
*/
#include "mana_ib.h"
+#include "linux/pci.h"
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
@@ -243,7 +244,6 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
struct mana_ib_queue *queue)
{
- struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue_spec spec = {};
int err;
@@ -252,7 +252,7 @@ int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
spec.type = type;
spec.monitor_avl_buf = false;
spec.queue_size = size;
- err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem);
+ err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
if (err)
return err;
/* take ownership into mana_ib from mana */
@@ -479,7 +479,7 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
{
unsigned long page_sz;
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -494,7 +494,7 @@ int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_ume
unsigned long page_sz;
/* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -551,6 +551,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct ib_port_attr attr;
int err;
@@ -560,10 +561,12 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
- if (port_num == 1) {
- immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ if (mana_ib_is_rnic(dev)) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
}
return 0;
@@ -572,12 +575,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- struct mana_ib_dev *dev = container_of(ibdev,
- struct mana_ib_dev, ib_dev);
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
memset(props, 0, sizeof(*props));
+ props->vendor_id = pdev->vendor;
+ props->vendor_part_id = dev->gdma_dev->dev_id.type;
props->max_mr_size = MANA_IB_MAX_MR_SIZE;
- props->page_size_cap = PAGE_SZ_BM;
+ props->page_size_cap = dev->adapter_caps.page_size_cap;
props->max_qp = dev->adapter_caps.max_qp_count;
props->max_qp_wr = dev->adapter_caps.max_qp_wr;
props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
@@ -596,6 +601,8 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
props->max_ah = INT_MAX;
props->max_pkeys = 1;
props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
+ if (!mana_ib_is_rnic(dev))
+ props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
return 0;
}
@@ -603,6 +610,7 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
if (!ndev)
@@ -623,7 +631,7 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_EDR;
props->pkey_tbl_len = 1;
- if (port == 1) {
+ if (mana_ib_is_rnic(dev)) {
props->gid_tbl_len = 16;
props->port_cap_flags = IB_PORT_CM_SUP;
props->ip_gids = true;
@@ -696,6 +704,41 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
caps->max_recv_sge_count = resp.max_recv_sge_count;
caps->feature_flags = resp.feature_flags;
+ caps->page_size_cap = PAGE_SZ_BM;
+ if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
+ caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
+
+ return 0;
+}
+
+int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
+{
+ struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
+ struct gdma_query_max_resources_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
+ sizeof(req), sizeof(resp));
+
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&dev->ib_dev,
+ "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
+ caps->max_cq_count = resp.max_cq;
+ caps->max_mr_count = resp.max_mst;
+ caps->max_pd_count = 0x6000;
+ caps->max_qp_wr = min_t(u32,
+ 0x100000 / GDMA_MAX_SQE_SIZE,
+ 0x100000 / GDMA_MAX_RQE_SIZE);
+ caps->max_send_sge_count = 30;
+ caps->max_recv_sge_count = 15;
+ caps->page_size_cap = PAGE_SZ_BM;
+
return 0;
}
@@ -740,7 +783,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
spec.eq.msix_index = 0;
- err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
if (err)
return err;
@@ -791,7 +834,7 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
req.hdr.req.msg_version = GDMA_MESSAGE_V2;
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.notify_eq_id = mdev->fatal_err_eq->id;
if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
@@ -816,7 +859,7 @@ int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
gc = mdev_to_gc(mdev);
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -843,7 +886,7 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_ADD;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -873,7 +916,7 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_REMOVE;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -896,7 +939,7 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = op;
copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
@@ -917,8 +960,11 @@ int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 do
struct mana_rnic_create_cq_req req = {};
int err;
+ if (!mdev->eqs)
+ return -EINVAL;
+
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.gdma_region = cq->queue.gdma_region;
req.eq_id = mdev->eqs[cq->comp_vector]->id;
@@ -950,7 +996,7 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
return 0;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.cq_handle = cq->cq_handle;
@@ -976,7 +1022,7 @@ int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1012,7 +1058,7 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.rc_qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -1035,7 +1081,7 @@ int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1070,7 +1116,7 @@ int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6903946677e5..42bebd6cd4f7 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -60,6 +60,7 @@ struct mana_ib_adapter_caps {
u32 max_recv_sge_count;
u32 max_inline_data_size;
u64 feature_flags;
+ u64 page_size_cap;
};
struct mana_ib_queue {
@@ -543,6 +544,11 @@ static inline void mana_put_qp_ref(struct mana_ib_qp *qp)
complete(&qp->free);
}
+static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB;
+}
+
static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
@@ -642,6 +648,7 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev);
int mana_ib_create_eqs(struct mana_ib_dev *mdev);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index f99557ec7767..6d974d0a8400 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -5,8 +5,8 @@
#include "mana_ib.h"
-#define VALID_MR_FLAGS \
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
+#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
@@ -24,6 +24,9 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags)
if (access_flags & IB_ACCESS_REMOTE_READ)
flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
+
return flags;
}
@@ -48,7 +51,10 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
req.gva.virtual_address = mr_params->gva.virtual_address;
req.gva.access_flags = mr_params->gva.access_flags;
break;
-
+ case GDMA_MR_TYPE_ZBVA:
+ req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
+ req.zbva.access_flags = mr_params->zbva.access_flags;
+ break;
default:
ibdev_dbg(&dev->ib_dev,
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
@@ -144,11 +150,18 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
- mr_params.mr_type = GDMA_MR_TYPE_GVA;
- mr_params.gva.dma_region_handle = dma_region_handle;
- mr_params.gva.virtual_address = iova;
- mr_params.gva.access_flags =
- mana_ib_verbs_to_gdma_access_flags(access_flags);
+ if (access_flags & IB_ZERO_BASED) {
+ mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
+ mr_params.zbva.dma_region_handle = dma_region_handle;
+ mr_params.zbva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ } else {
+ mr_params.mr_type = GDMA_MR_TYPE_GVA;
+ mr_params.gva.dma_region_handle = dma_region_handle;
+ mr_params.gva.virtual_address = iova;
+ mr_params.gva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ }
err = mana_ib_gd_create_mr(dev, mr, &mr_params);
if (err)
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index c928af58f38b..14fd7d6c54a2 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -635,7 +635,6 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
{
struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
- struct gdma_context *gc = mdev_to_gc(mdev);
u32 doorbell, queue_size;
int i, err;
@@ -654,7 +653,7 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto destroy_queues;
}
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
sizeof(struct ud_rq_shadow_wqe));
@@ -736,7 +735,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
req.qp_state = attr->qp_state;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 33f525b744f2..e279e69b9a51 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -43,7 +43,7 @@
#define MAX_VFS 80
#define MAX_PEND_REQS_PER_FUNC 4
-#define MAD_TIMEOUT_MS 2000
+#define MAD_TIMEOUT_SEC 2
#define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
#define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
@@ -270,7 +270,7 @@ static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -309,7 +309,7 @@ static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
for (i = 0; i < MAX_VFS; ++i)
clean_vf_mcast(ctx, i);
- end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
+ end = jiffies + secs_to_jiffies(MAD_TIMEOUT_SEC + 3);
do {
count = 0;
mutex_lock(&ctx->mcg_table_lock);
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 0ff9f18a71e8..680627f1de33 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1645,11 +1645,6 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
}
-enum {
- LEFTOVERS_MC,
- LEFTOVERS_UC,
-};
-
static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct ib_flow_attr *flow_attr,
@@ -1659,43 +1654,32 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
struct mlx5_ib_flow_handler *handler = NULL;
static struct {
- struct ib_flow_attr flow_attr;
struct ib_flow_spec_eth eth_flow;
- } leftovers_specs[] = {
- [LEFTOVERS_MC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {0x1} }
- }
- },
- [LEFTOVERS_UC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {} }
- }
- }
- };
+ struct ib_flow_attr flow_attr;
+ } leftovers_wc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_wc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = { 0x1 } } } };
- handler = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_MC].flow_attr,
- dst);
+ static struct {
+ struct ib_flow_spec_eth eth_flow;
+ struct ib_flow_attr flow_attr;
+ } leftovers_uc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_uc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = {} } } };
+
+ handler = create_flow_rule(dev, ft_prio, &leftovers_wc.flow_attr, dst);
if (!IS_ERR(handler) &&
flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
handler_ucast = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_UC].flow_attr,
- dst);
+ &leftovers_uc.flow_attr, dst);
if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d07cacaa0abd..ce7610740412 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -485,6 +485,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_1_200GBASE_CR1_KR1):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_HDR;
@@ -493,10 +497,18 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_2_400GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_800GAUI_4_800GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_XDR;
+ break;
default:
return -EINVAL;
}
@@ -4422,17 +4434,6 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
mlx5_core_native_port_num(dev->mdev) - 1);
}
-static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
-{
- dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
- return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
-}
-
-static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
-{
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
-}
-
static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
{
int err;
@@ -4662,9 +4663,6 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
@@ -4722,9 +4720,6 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index ace2df3e1d9f..fde859d207ae 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -351,6 +351,7 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_UPD_XLT_PD BIT(4)
#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
+#define MLX5_IB_UPD_XLT_DOWNGRADE BIT(7)
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
*
@@ -1005,7 +1006,6 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_ODP,
MLX5_IB_STAGE_COUNTERS,
MLX5_IB_STAGE_CONG_DEBUGFS,
- MLX5_IB_STAGE_UAR,
MLX5_IB_STAGE_BFREG,
MLX5_IB_STAGE_PRE_IB_REG_UMR,
MLX5_IB_STAGE_WHITELIST_UID,
@@ -1473,8 +1473,8 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags);
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags);
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
@@ -1495,8 +1495,11 @@ static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
{
return 0;
}
-static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags) {}
+static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ return -EOPNOTSUPP;
+}
static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 5fbebafc8774..6dd813bac5b2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -525,7 +525,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
ent->fill_to_high_water = false;
if (ent->pending)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
else
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
@@ -576,7 +576,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
"add keys command failed, err %d\n",
err);
queue_delayed_work(cache->wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
}
}
} else if (ent->mkeys_queue.ci > 2 * ent->limit) {
@@ -2051,7 +2051,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->in_use--;
if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(30 * 1000));
+ secs_to_jiffies(30));
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 86d8fa63bf69..eaa2f9f5f3a9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -34,6 +34,9 @@
#include <linux/kernel.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
+#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
+#include <linux/pci-p2pdma.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -158,41 +161,50 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
}
}
-static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
-{
- u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
-
- if (umem_dma & ODP_READ_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_READ;
- if (umem_dma & ODP_WRITE_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_WRITE;
-
- return mtt_entry;
-}
-
-static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+static int populate_mtt(__be64 *pas, size_t start, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- dma_addr_t pa;
+ bool downgrade = flags & MLX5_IB_UPD_XLT_DOWNGRADE;
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ struct ib_device *dev = odp->umem.ibdev;
size_t i;
if (flags & MLX5_IB_UPD_XLT_ZAP)
- return;
+ return 0;
for (i = 0; i < nentries; i++) {
- pa = odp->dma_list[idx + i];
- pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ unsigned long pfn = odp->map.pfn_list[start + i];
+ dma_addr_t dma_addr;
+
+ pfn = odp->map.pfn_list[start + i];
+ if (!(pfn & HMM_PFN_VALID))
+ /* ODP initialization */
+ continue;
+
+ dma_addr = hmm_dma_map_pfn(dev->dma_device, &odp->map,
+ start + i, &p2pdma_state);
+ if (ib_dma_mapping_error(dev, dma_addr))
+ return -EFAULT;
+
+ dma_addr |= MLX5_IB_MTT_READ;
+ if ((pfn & HMM_PFN_WRITE) && !downgrade)
+ dma_addr |= MLX5_IB_MTT_WRITE;
+
+ pas[i] = cpu_to_be64(dma_addr);
+ odp->npages++;
}
+ return 0;
}
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
populate_klm(xlt, idx, nentries, mr, flags);
+ return 0;
} else {
- populate_mtt(xlt, idx, nentries, mr, flags);
+ return populate_mtt(xlt, idx, nentries, mr, flags);
}
}
@@ -303,8 +315,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
* estimate the cost of another UMR vs. the cost of bigger
* UMR.
*/
- if (umem_odp->dma_list[idx] &
- (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) {
if (!in_block) {
blk_start_idx = idx;
in_block = 1;
@@ -687,7 +698,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
{
int page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- u64 access_mask;
+ u64 access_mask = 0;
u64 start_idx;
bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
@@ -695,12 +706,14 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+ if (flags & MLX5_PF_FLAGS_DOWNGRADE)
+ xlt_flags |= MLX5_IB_UPD_XLT_DOWNGRADE;
+
page_shift = odp->page_shift;
start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
- access_mask = ODP_READ_ALLOWED_BIT;
if (odp->umem.writable && !downgrade)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
if (np < 0)
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index d3dcc272200a..146d03ae40bd 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -21,8 +21,10 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
spin_lock_irqsave(&table->lock, flags);
common = radix_tree_lookup(&table->tree, rsn);
- if (common)
+ if (common && !common->invalid)
refcount_inc(&common->refcount);
+ else
+ common = NULL;
spin_unlock_irqrestore(&table->lock, flags);
@@ -178,6 +180,18 @@ static int create_resource_common(struct mlx5_ib_dev *dev,
return 0;
}
+static void modify_resource_common_state(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp,
+ bool invalid)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ qp->common.invalid = invalid;
+ spin_unlock_irqrestore(&table->lock, flags);
+}
+
static void destroy_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp)
{
@@ -609,8 +623,20 @@ err_destroy_rq:
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
{
+ int ret;
+
+ /* The rq destruction can be called again in case it fails, hence we
+ * mark the common resource as invalid and only once FW destruction
+ * is completed successfully we actually destroy the resources.
+ */
+ modify_resource_common_state(dev, rq, true);
+ ret = destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ if (ret) {
+ modify_resource_common_state(dev, rq, false);
+ return ret;
+ }
destroy_resource_common(dev, rq);
- return destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ return 0;
}
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 793f3c5c4d01..5be4426a2884 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -840,7 +840,17 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
size_to_map = npages * desc_size;
dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
- mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ /*
+ * npages is the maximum number of pages to map, but we
+ * can't guarantee that all pages are actually mapped.
+ *
+ * For example, if page is p2p of type which is not supported
+ * for mapping, the number of pages mapped will be less than
+ * requested.
+ */
+ err = mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ if (err)
+ return err;
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 192f83fd7c8a..dacb8ceeebe0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -144,7 +144,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index f948b76f984d..3fbf99757b11 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -56,7 +56,7 @@ static int usnic_uiom_dma_fault(struct iommu_domain *domain,
unsigned long iova, int flags,
void *token)
{
- usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
+ usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n",
dev_name(dev),
domain, iova, flags);
return -ENOSYS;
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index c180e7ebcfc5..1ed5b63f8afc 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
- depends on INET && PCI && INFINIBAND
+ depends on INET && PCI && INFINIBAND && 64BIT
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRC32
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index b248c68bf9b1..3a77d6db1720 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -101,6 +101,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_FLUSH;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC_WRITE;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0bc3fbb6554f..876702058c84 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -70,9 +70,9 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
void *addr, int length, enum rxe_mr_copy_dir dir);
int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
@@ -193,13 +193,16 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
/* rxe_odp.c */
extern const struct mmu_interval_notifier_ops rxe_mn_ops;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#if defined CONFIG_INFINIBAND_ON_DEMAND_PAGING
int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
u64 iova, int access_flags, struct rxe_mr *mr);
int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length);
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -212,9 +215,19 @@ static inline int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
{
return -EOPNOTSUPP;
}
-static inline int
+static inline enum resp_states
rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
+static inline int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ return -EOPNOTSUPP;
+}
+static inline enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr,
+ u64 iova, u64 value)
{
return RESPST_ERR_UNSUPPORTED_OPCODE;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 432d864c3ce9..bcb97b3ea58a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -424,7 +424,7 @@ err1:
return err;
}
-int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
+static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
{
unsigned int page_offset;
unsigned long index;
@@ -433,16 +433,6 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
int err;
u8 *va;
- /* mr must be valid even if length is zero */
- if (WARN_ON(!mr))
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- if (mr->ibmr.type == IB_MR_TYPE_DMA)
- return -EFAULT;
-
err = mr_check_range(mr, iova, length);
if (err)
return err;
@@ -454,7 +444,7 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
if (!page)
return -EFAULT;
bytes = min_t(unsigned int, length,
- mr_page_size(mr) - page_offset);
+ mr_page_size(mr) - page_offset);
va = kmap_local_page(page);
arch_wb_cache_pmem(va + page_offset, bytes);
@@ -468,11 +458,33 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
return 0;
}
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 start, unsigned int length)
+{
+ int err;
+
+ /* mr must be valid even if length is zero */
+ if (WARN_ON(!mr))
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA)
+ return -EFAULT;
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_flush_pmem_iova(mr, start, length);
+ else
+ err = rxe_mr_flush_pmem_iova(mr, start, length);
+
+ return err;
+}
+
/* Guarantee atomicity of atomic operations at the machine level. */
DEFINE_SPINLOCK(atomic_ops_lock);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
unsigned int page_offset;
struct page *page;
@@ -524,27 +536,15 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-#if defined CONFIG_64BIT
-/* only implemented or called for 64 bit architectures */
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
{
unsigned int page_offset;
struct page *page;
u64 *va;
- /* ODP is not supported right now. WIP. */
- if (is_odp_mr(mr))
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
- /* See IBA oA19-28 */
- if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state\n");
- return RESPST_ERR_RKEY_VIOLATION;
- }
-
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
page_offset = iova & (PAGE_SIZE - 1);
page = ib_virt_dma_to_page(iova);
@@ -572,20 +572,12 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
}
va = kmap_local_page(page);
-
/* Do atomic write after all prior operations have completed */
smp_store_release(&va[page_offset >> 3], value);
-
kunmap_local(va);
- return 0;
-}
-#else
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
-{
- return RESPST_ERR_UNSUPPORTED_OPCODE;
+ return RESPST_NONE;
}
-#endif
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index 9f6e2bb2a269..dbc5a5600eb7 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -4,6 +4,7 @@
*/
#include <linux/hmm.h>
+#include <linux/libnvdimm.h>
#include <rdma/ib_umem_odp.h>
@@ -26,7 +27,7 @@ static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni,
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
- /* update umem_odp->dma_list */
+ /* update umem_odp->map.pfn_list */
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
mutex_unlock(&umem_odp->umem_mutex);
@@ -44,12 +45,11 @@ static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcn
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool fault = !(flags & RXE_PAGEFAULT_SNAPSHOT);
- u64 access_mask;
+ u64 access_mask = 0;
int np;
- access_mask = ODP_READ_ALLOWED_BIT;
if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY))
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
/*
* ib_umem_odp_map_dma_and_lock() locks umem_mutex on success.
@@ -124,8 +124,8 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
return err;
}
-static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
- u64 iova, int length, u32 perm)
+static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova,
+ int length)
{
bool need_fault = false;
u64 addr;
@@ -137,7 +137,7 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
while (addr < iova + length) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- if (!(umem_odp->dma_list[idx] & perm)) {
+ if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) {
need_fault = true;
break;
}
@@ -147,23 +147,28 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
return need_fault;
}
+static unsigned long rxe_odp_iova_to_index(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+}
+
+static unsigned long rxe_odp_iova_to_page_offset(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return iova & (BIT(umem_odp->page_shift) - 1);
+}
+
static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u32 flags)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool need_fault;
- u64 perm;
int err;
if (unlikely(length < 1))
return -EINVAL;
- perm = ODP_READ_ALLOWED_BIT;
- if (!(flags & RXE_PAGEFAULT_RDONLY))
- perm |= ODP_WRITE_ALLOWED_BIT;
-
mutex_lock(&umem_odp->umem_mutex);
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault) {
mutex_unlock(&umem_odp->umem_mutex);
@@ -173,7 +178,7 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
if (err < 0)
return err;
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault)
return -EFAULT;
}
@@ -190,13 +195,13 @@ static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
size_t offset;
u8 *user_va;
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- offset = iova & (BIT(umem_odp->page_shift) - 1);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
while (length > 0) {
u8 *src, *dest;
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
user_va = kmap_local_page(page);
if (!user_va)
return -EFAULT;
@@ -255,8 +260,9 @@ int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return err;
}
-static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova,
+ int opcode, u64 compare,
+ u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
unsigned int page_offset;
@@ -277,9 +283,9 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return RESPST_ERR_RKEY_VIOLATION;
}
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- page_offset = iova & (BIT(umem_odp->page_shift) - 1);
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
if (!page)
return RESPST_ERR_RKEY_VIOLATION;
@@ -304,11 +310,11 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
int err;
@@ -324,3 +330,91 @@ int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return err;
}
+
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ unsigned int bytes;
+ int err;
+ u8 *va;
+
+ err = rxe_odp_map_range_and_lock(mr, iova, length,
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return err;
+
+ while (length > 0) {
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return -EFAULT;
+ }
+
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+
+ va = kmap_local_page(page);
+ arch_wb_cache_pmem(va + page_offset, bytes);
+ kunmap_local(va);
+
+ length -= bytes;
+ iova += bytes;
+ page_offset = 0;
+ }
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return 0;
+}
+
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ int err;
+ u64 *va;
+
+ /* See IBA oA19-28 */
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ err = rxe_odp_map_range_and_lock(mr, iova, sizeof(value),
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+ /* See IBA A19.4.2 */
+ if (unlikely(page_offset & 0x7)) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ rxe_dbg_mr(mr, "misaligned address\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+ /* Do atomic write after all prior operations have completed */
+ smp_store_release(&va[page_offset >> 3], value);
+ kunmap_local(va);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return RESPST_NONE;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 003f681e5dc0..767870568372 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -53,12 +53,9 @@ enum rxe_device_param {
| IB_DEVICE_MEM_WINDOW
| IB_DEVICE_FLUSH_GLOBAL
| IB_DEVICE_FLUSH_PERSISTENT
-#ifdef CONFIG_64BIT
| IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_ATOMIC_WRITE,
-#else
- | IB_DEVICE_MEM_WINDOW_TYPE_2B,
-#endif /* CONFIG_64BIT */
+
RXE_MAX_SGE = 32,
RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
sizeof(struct ib_sge) * RXE_MAX_SGE,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 7975fb0e2782..f2af3e0aef35 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -811,7 +811,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
spin_unlock_irqrestore(&qp->state_lock, flags);
qp->qp_timeout_jiffies = 0;
- if (qp_type(qp) == IB_QPT_RC) {
+ /* In the function timer_setup, .function is initialized. If .function
+ * is NULL, it indicates the function timer_setup is not called, the
+ * timer is not initialized. Or else, the timer is initialized.
+ */
+ if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function &&
+ qp->rnr_nak_timer.function) {
timer_delete_sync(&qp->retrans_timer);
timer_delete_sync(&qp->rnr_nak_timer);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5d9174e408db..711f73e0bbb1 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -649,10 +649,6 @@ static enum resp_states process_flush(struct rxe_qp *qp,
struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
- /* ODP is not supported right now. WIP. */
- if (is_odp_mr(mr))
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
/* oA19-14, oA19-15 */
if (res && res->replay)
return RESPST_ACKNOWLEDGE;
@@ -753,7 +749,16 @@ static enum resp_states atomic_write_reply(struct rxe_qp *qp,
value = *(u64 *)payload_addr(pkt);
iova = qp->resp.va + qp->resp.offset;
- err = rxe_mr_do_atomic_write(mr, iova, value);
+ /* See IBA oA19-28 */
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_do_atomic_write(mr, iova, value);
+ else
+ err = rxe_mr_do_atomic_write(mr, iova, value);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 80332638d9e3..6f8f353e9583 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -85,17 +85,17 @@ static bool is_done(struct rxe_task *task)
/* do_task is a wrapper for the three tasks (requester,
* completer, responder) and calls them in a loop until
- * they return a non-zero value. It is called either
- * directly by rxe_run_task or indirectly if rxe_sched_task
- * schedules the task. They must call __reserve_if_idle to
- * move the task to busy before calling or scheduling.
- * The task can also be moved to drained or invalid
- * by calls to rxe_cleanup_task or rxe_disable_task.
- * In that case tasks which get here are not executed but
- * just flushed. The tasks are designed to look to see if
- * there is work to do and then do part of it before returning
- * here with a return value of zero until all the work
- * has been consumed then it returns a non-zero value.
+ * they return a non-zero value. It is called indirectly
+ * when rxe_sched_task schedules the task. They must
+ * call __reserve_if_idle to move the task to busy before
+ * calling or scheduling. The task can also be moved to
+ * drained or invalid by calls to rxe_cleanup_task or
+ * rxe_disable_task. In that case tasks which get here
+ * are not executed but just flushed. The tasks are
+ * designed to look to see if there is work to do and
+ * then do part of it before returning here with a return
+ * value of zero until all the work has been consumed then
+ * it returns a non-zero value.
* The number of times the task can be run is limited by
* max iterations so one task cannot hold the cpu forever.
* If the limit is hit and work remains the task is rescheduled.
@@ -234,24 +234,6 @@ void rxe_cleanup_task(struct rxe_task *task)
spin_unlock_irqrestore(&task->lock, flags);
}
-/* run the task inline if it is currently idle
- * cannot call do_task holding the lock
- */
-void rxe_run_task(struct rxe_task *task)
-{
- unsigned long flags;
- bool run;
-
- WARN_ON(rxe_read(task->qp) <= 0);
-
- spin_lock_irqsave(&task->lock, flags);
- run = __reserve_if_idle(task);
- spin_unlock_irqrestore(&task->lock, flags);
-
- if (run)
- do_task(task);
-}
-
/* schedule the task to run later as a work queue entry.
* the queue_work call can be called holding
* the lock.
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index a63e258b3d66..a8c9a77b6027 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -47,8 +47,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
/* cleanup task */
void rxe_cleanup_task(struct rxe_task *task);
-void rxe_run_task(struct rxe_task *task);
-
void rxe_sched_task(struct rxe_task *task);
/* keep a task from scheduling */
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index d9e5a2e4c471..f5fd71717b80 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -718,7 +718,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index f3c2226aff94..25b3c741b66b 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -72,7 +72,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
wc->opcode = map_wc_opcode[cqe->opcode];
wc->status = map_cqe_status[cqe->status].ib;
siw_dbg_cq(cq,
- "idx %u, type %d, flags %2x, id 0x%pK\n",
+ "idx %u, type %d, flags %2x, id 0x%p\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
cqe->flags, (void *)(uintptr_t)cqe->id);
} else {
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index dcb963607c8b..d5ddeb17bd22 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -18,30 +18,6 @@
#define SIW_STAG_MAX_INDEX 0x00ffffff
/*
- * The code avoids special Stag of zero and tries to randomize
- * STag values between 1 and SIW_STAG_MAX_INDEX.
- */
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m)
-{
- struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX);
- u32 id, next;
-
- get_random_bytes(&next, 4);
- next &= SIW_STAG_MAX_INDEX;
-
- if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next,
- GFP_KERNEL) < 0)
- return -ENOMEM;
-
- /* Set the STag index part */
- m->stag = id << 8;
-
- siw_dbg_mem(m, "new MEM object\n");
-
- return 0;
-}
-
-/*
* siw_mem_id2obj()
*
* resolves memory from stag given by id. might be called from:
@@ -181,10 +157,10 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
*/
if (addr < mem->va || addr + len > mem->va + mem->len) {
siw_dbg_pd(pd, "MEM interval len %d\n", len);
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] out of bounds\n",
(void *)(uintptr_t)addr,
(void *)(uintptr_t)(addr + len));
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] STag=0x%08x\n",
(void *)(uintptr_t)mem->va,
(void *)(uintptr_t)(mem->va + mem->len),
mem->stag);
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index e74cfcd6dbc1..8e769d30e2ac 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -12,7 +12,6 @@ void siw_umem_release(struct siw_umem *umem);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
enum ib_access_flags perms, int len);
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 32554eba1eac..a10820e33887 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,7 +38,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
p = siw_get_upage(umem, dest_addr);
if (unlikely(!p)) {
- pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
+ pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
__func__, qp_id(rx_qp(srx)),
(void *)(uintptr_t)dest_addr,
(void *)(uintptr_t)umem->fp_addr);
@@ -51,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
pg_off = dest_addr & ~PAGE_MASK;
bytes = min(len, (int)PAGE_SIZE - pg_off);
- siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
+ siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
dest = kmap_atomic(p);
rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -105,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
{
int rv;
- siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
+ siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
if (unlikely(rv)) {
- pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
+ pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
qp_id(rx_qp(srx)), __func__, len, kva, rv);
return rv;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index fd7b266a221b..2b2a7b8e93b0 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -936,7 +936,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
rv = -EINVAL;
break;
}
- siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
+ siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
sqe->opcode, sqe->flags,
(void *)(uintptr_t)sqe->id);
@@ -1102,7 +1102,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
siw_dbg_qp(qp, "error %d\n", rv);
*bad_wr = wr;
}
- return rv > 0 ? 0 : rv;
+ return rv;
}
int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
@@ -1332,7 +1332,7 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct siw_device *sdev = to_siw_dev(pd->device);
int rv;
- siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
+ siw_dbg_pd(pd, "start: 0x%p, va: 0x%p, len: %llu\n",
(void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
(unsigned long long)len);
@@ -1525,7 +1525,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
mem->len = base_mr->length;
mem->va = base_mr->iova;
siw_dbg_mem(mem,
- "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
+ "%llu bytes, start 0x%p, %u SLE to %u entries\n",
mem->len, (void *)(uintptr_t)mem->va, num_sle,
pbl->num_buf);
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 1008858f78e2..c066a4da7c14 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -105,6 +105,8 @@
#define PKT_XBE2_FW_5_EARLY 3
#define PKT_XBE2_FW_5_11 4
+#define FLAG_DELAY_INIT BIT(0)
+
static bool dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
@@ -127,6 +129,7 @@ static const struct xpad_device {
char *name;
u8 mapping;
u8 xtype;
+ u8 flags;
} xpad_device[] = {
/* Please keep this list sorted by vendor and product ID. */
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
@@ -416,6 +419,7 @@ static const struct xpad_device {
{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
{ 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
+ { 0x366c, 0x0005, "ByoWave Proteus Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE, FLAG_DELAY_INIT },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -571,6 +575,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x366c), /* ByoWave controllers */
XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
{ }
};
@@ -599,6 +604,7 @@ struct xboxone_init_packet {
* - https://github.com/medusalix/xone/blob/master/bus/protocol.c
*/
#define GIP_CMD_ACK 0x01
+#define GIP_CMD_ANNOUNCE 0x02
#define GIP_CMD_IDENTIFY 0x04
#define GIP_CMD_POWER 0x05
#define GIP_CMD_AUTHENTICATE 0x06
@@ -673,20 +679,19 @@ static const u8 xboxone_hori_ack_id[] = {
};
/*
- * This packet is required for most (all?) of the PDP pads to start
- * sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
+ * This packet is sent by default on Windows, and is required for some pads to
+ * start sending input reports, including most (all?) of the PDP. These pads
+ * include: (0x0e6f:0x02ab), (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_led_on[] = {
- GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14
-};
+static const u8 xboxone_led_on[] = { GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0,
+GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14 };
/*
* This packet is required for most (all?) of the PDP pads to start
* sending input reports. These pads include: (0x0e6f:0x02ab),
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_auth[] = {
+static const u8 xboxone_auth_done[] = {
GIP_CMD_AUTHENTICATE, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(2), 0x01, 0x00
};
@@ -723,12 +728,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
- XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_auth),
- XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_led_on),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_auth_done),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
@@ -788,10 +789,13 @@ struct usb_xpad {
const char *name; /* name of the device */
struct work_struct work; /* init/remove device from callback */
time64_t mode_btn_down_ts;
+ bool delay_init; /* init packets should be delayed */
+ bool delayed_init_done;
};
static int xpad_init_input(struct usb_xpad *xpad);
static void xpad_deinit_input(struct usb_xpad *xpad);
+static int xpad_start_input(struct usb_xpad *xpad);
static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num);
static void xpad360w_poweroff_controller(struct usb_xpad *xpad);
@@ -1076,6 +1080,17 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
do_sync = true;
}
+ } else if (data[0] == GIP_CMD_ANNOUNCE) {
+ int error;
+
+ if (xpad->delay_init && !xpad->delayed_init_done) {
+ xpad->delayed_init_done = true;
+ error = xpad_start_input(xpad);
+ if (error)
+ dev_warn(&xpad->dev->dev,
+ "unable to start delayed input: %d\n",
+ error);
+ }
} else if (data[0] == GIP_CMD_INPUT) { /* The main valid packet type for inputs */
/* menu/view buttons */
input_report_key(dev, BTN_START, data[4] & BIT(2));
@@ -1254,6 +1269,14 @@ static bool xpad_prepare_next_init_packet(struct usb_xpad *xpad)
if (xpad->xtype != XTYPE_XBOXONE)
return false;
+ /*
+ * Some dongles will discard init packets if they're sent before the
+ * controller connects. In these cases, we need to wait until we get
+ * an announce packet from them to send the init packet sequence.
+ */
+ if (xpad->delay_init && !xpad->delayed_init_done)
+ return false;
+
/* Perform initialization sequence for Xbox One pads that require it */
while (xpad->init_seq < ARRAY_SIZE(xboxone_init_packets)) {
init_packet = &xboxone_init_packets[xpad->init_seq++];
@@ -2069,6 +2092,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping = xpad_device[i].mapping;
xpad->xtype = xpad_device[i].xtype;
xpad->name = xpad_device[i].name;
+ if (xpad_device[i].flags & FLAG_DELAY_INIT)
+ xpad->delay_init = true;
+
xpad->packet_type = PKT_XB;
INIT_WORK(&xpad->work, xpad_presence_work);
@@ -2268,6 +2294,7 @@ static int xpad_resume(struct usb_interface *intf)
struct usb_xpad *xpad = usb_get_intfdata(intf);
struct input_dev *input = xpad->dev;
+ xpad->delayed_init_done = false;
if (xpad->xtype == XTYPE_XBOX360W)
return xpad360w_start_input(xpad);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index adf0f311996c..3ff2fcf05ad5 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -37,7 +37,7 @@ static int atkbd_set = 2;
module_param_named(set, atkbd_set, int, 0);
MODULE_PARM_DESC(set, "Select keyboard code set (2 = default, 3 = PS/2 native)");
-#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__) || defined(__loongarch__)
static bool atkbd_reset;
#else
static bool atkbd_reset = true;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 5c39a217b94c..f9db86da0818 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -449,6 +449,8 @@ static enum hrtimer_restart gpio_keys_irq_timer(struct hrtimer *t)
release_timer);
struct input_dev *input = bdata->input;
+ guard(spinlock_irqsave)(&bdata->lock);
+
if (bdata->key_pressed) {
input_report_key(input, *bdata->code, 0);
input_sync(input);
@@ -486,7 +488,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
if (bdata->release_delay)
hrtimer_start(&bdata->release_timer,
ms_to_ktime(bdata->release_delay),
- HRTIMER_MODE_REL_HARD);
+ HRTIMER_MODE_REL);
out:
return IRQ_HANDLED;
}
@@ -628,7 +630,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
bdata->release_delay = button->debounce_interval;
hrtimer_setup(&bdata->release_timer, gpio_keys_irq_timer,
- CLOCK_REALTIME, HRTIMER_MODE_REL_HARD);
+ CLOCK_REALTIME, HRTIMER_MODE_REL);
isr = gpio_keys_irq_isr;
irqflags = 0;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index e46473cb817c..e50a6fea9a60 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -104,6 +104,16 @@ static void disable_row_irqs(struct matrix_keypad *keypad)
disable_irq_nosync(keypad->row_irqs[i]);
}
+static uint32_t read_row_state(struct matrix_keypad *keypad)
+{
+ int row;
+ u32 row_state = 0;
+
+ for (row = 0; row < keypad->num_row_gpios; row++)
+ row_state |= row_asserted(keypad, row) ? BIT(row) : 0;
+ return row_state;
+}
+
/*
* This gets the keys from keyboard and reports it to input subsystem
*/
@@ -115,6 +125,10 @@ static void matrix_keypad_scan(struct work_struct *work)
const unsigned short *keycodes = input_dev->keycode;
uint32_t new_state[MATRIX_MAX_COLS];
int row, col, code;
+ u32 init_row_state, new_row_state;
+
+ /* read initial row state to detect changes between scan */
+ init_row_state = read_row_state(keypad);
/* de-activate all columns for scanning */
activate_all_cols(keypad, false);
@@ -129,9 +143,7 @@ static void matrix_keypad_scan(struct work_struct *work)
activate_col(keypad, col, true);
- for (row = 0; row < keypad->num_row_gpios; row++)
- new_state[col] |=
- row_asserted(keypad, row) ? BIT(row) : 0;
+ new_state[col] = read_row_state(keypad);
activate_col(keypad, col, false);
}
@@ -165,6 +177,18 @@ static void matrix_keypad_scan(struct work_struct *work)
keypad->scan_pending = false;
enable_row_irqs(keypad);
}
+
+ /* read new row state and detect if value has changed */
+ new_row_state = read_row_state(keypad);
+ if (init_row_state != new_row_state) {
+ guard(spinlock_irq)(&keypad->lock);
+ if (unlikely(keypad->scan_pending || keypad->stopped))
+ return;
+ disable_row_irqs(keypad);
+ keypad->scan_pending = true;
+ schedule_delayed_work(&keypad->work,
+ msecs_to_jiffies(keypad->debounce_ms));
+ }
}
static irqreturn_t matrix_keypad_interrupt(int irq, void *id)
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index bbf409dda89f..fe7398eeb828 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -27,6 +27,8 @@
#define SNVS_HPSR_BTN BIT(6)
#define SNVS_LPSR_SPO BIT(18)
#define SNVS_LPCR_DEP_EN BIT(5)
+#define SNVS_LPCR_BPT_SHIFT 16
+#define SNVS_LPCR_BPT_MASK (3 << SNVS_LPCR_BPT_SHIFT)
#define DEBOUNCE_TIME 30
#define REPEAT_INTERVAL 60
@@ -114,6 +116,8 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
struct device_node *np;
struct clk *clk;
int error;
+ unsigned int val;
+ unsigned int bpt;
u32 vid;
/* Get SNVS register Page */
@@ -148,6 +152,27 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
if (pdata->irq < 0)
return -EINVAL;
+ error = of_property_read_u32(np, "power-off-time-sec", &val);
+ if (!error) {
+ switch (val) {
+ case 0:
+ bpt = 0x3;
+ break;
+ case 5:
+ case 10:
+ case 15:
+ bpt = (val / 5) - 1;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "power-off-time-sec %d out of range\n", val);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pdata->snvs, SNVS_LPCR_REG, SNVS_LPCR_BPT_MASK,
+ bpt << SNVS_LPCR_BPT_SHIFT);
+ }
+
regmap_read(pdata->snvs, SNVS_HPVIDR1_REG, &vid);
pdata->minor_rev = vid & 0xff;
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index d9ee14b1f451..4581f1c53644 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -844,6 +844,12 @@ static int ims_pcu_flash_firmware(struct ims_pcu *pcu,
addr = be32_to_cpu(rec->addr) / 2;
len = be16_to_cpu(rec->len);
+ if (len > sizeof(pcu->cmd_buf) - 1 - sizeof(*fragment)) {
+ dev_err(pcu->dev,
+ "Invalid record length in firmware: %d\n", len);
+ return -EINVAL;
+ }
+
fragment = (void *)&pcu->cmd_buf[1];
put_unaligned_le32(addr, &fragment->addr);
fragment->len = len;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index cd750f512dee..0a33d995d15d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -192,6 +192,7 @@ config MSM_IOMMU
If unsure, say N here.
source "drivers/iommu/amd/Kconfig"
+source "drivers/iommu/arm/Kconfig"
source "drivers/iommu/intel/Kconfig"
source "drivers/iommu/iommufd/Kconfig"
source "drivers/iommu/riscv/Kconfig"
@@ -199,7 +200,6 @@ source "drivers/iommu/riscv/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
- select DMAR_TABLE if INTEL_IOMMU
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
@@ -314,150 +314,6 @@ config APPLE_DART
Say Y here if you are using an Apple SoC.
-# ARM IOMMU support
-config ARM_SMMU
- tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on ARM64 || ARM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU if ARM
- help
- Support for implementations of the ARM System MMU architecture
- versions 1 and 2.
-
- Say Y here if your SoC includes an IOMMU device implementing
- the ARM SMMU architecture.
-
-config ARM_SMMU_LEGACY_DT_BINDINGS
- bool "Support the legacy \"mmu-masters\" devicetree bindings"
- depends on ARM_SMMU=y && OF
- help
- Support for the badly designed and deprecated "mmu-masters"
- devicetree bindings. This allows some DMA masters to attach
- to the SMMU but does not provide any support via the DMA API.
- If you're lucky, you might be able to get VFIO up and running.
-
- If you say Y here then you'll make me very sad. Instead, say N
- and move your firmware to the utopian future that was 2016.
-
-config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
- bool "Default to disabling bypass on ARM SMMU v1 and v2"
- depends on ARM_SMMU
- default y
- help
- Say Y here to (by default) disable bypass streams such that
- incoming transactions from devices that are not attached to
- an iommu domain will report an abort back to the device and
- will not be allowed to pass through the SMMU.
-
- Any old kernels that existed before this KConfig was
- introduced would default to _allowing_ bypass (AKA the
- equivalent of NO for this config). However the default for
- this option is YES because the old behavior is insecure.
-
- There are few reasons to allow unmatched stream bypass, and
- even fewer good ones. If saying YES here breaks your board
- you should work on fixing your board. This KConfig option
- is expected to be removed in the future and we'll simply
- hardcode the bypass disable in the code.
-
- NOTE: the kernel command line parameter
- 'arm-smmu.disable_bypass' will continue to override this
- config.
-
-config ARM_SMMU_MMU_500_CPRE_ERRATA
- bool "Enable errata workaround for CPRE in SMMU reset path"
- depends on ARM_SMMU
- default y
- help
- Say Y here (by default) to apply workaround to disable
- MMU-500's next-page prefetcher for sake of 4 known errata.
-
- Say N here only when it is sure that any errata related to
- prefetch enablement are not applicable on the platform.
- Refer silicon-errata.rst for info on errata IDs.
-
-config ARM_SMMU_QCOM
- def_tristate y
- depends on ARM_SMMU && ARCH_QCOM
- select QCOM_SCM
- help
- When running on a Qualcomm platform that has the custom variant
- of the ARM SMMU, this needs to be built into the SMMU driver.
-
-config ARM_SMMU_QCOM_DEBUG
- bool "ARM SMMU QCOM implementation defined debug support"
- depends on ARM_SMMU_QCOM=y
- help
- Support for implementation specific debug features in ARM SMMU
- hardware found in QTI platforms. This include support for
- the Translation Buffer Units (TBU) that can be used to obtain
- additional information when debugging memory management issues
- like context faults.
-
- Say Y here to enable debug for issues such as context faults
- or TLB sync timeouts which requires implementation defined
- register dumps.
-
-config ARM_SMMU_V3
- tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
- depends on ARM64
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select GENERIC_MSI_IRQ
- select IOMMUFD_DRIVER if IOMMUFD
- help
- Support for implementations of the ARM System MMU architecture
- version 3 providing translation support to a PCIe root complex.
-
- Say Y here if your system includes an IOMMU device implementing
- the ARM SMMUv3 architecture.
-
-if ARM_SMMU_V3
-config ARM_SMMU_V3_SVA
- bool "Shared Virtual Addressing support for the ARM SMMUv3"
- select IOMMU_SVA
- select IOMMU_IOPF
- select MMU_NOTIFIER
- help
- Support for sharing process address spaces with devices using the
- SMMUv3.
-
- Say Y here if your system supports SVA extensions such as PCIe PASID
- and PRI.
-
-config ARM_SMMU_V3_IOMMUFD
- bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
- depends on IOMMUFD
- help
- Support for IOMMUFD features intended to support virtual machines
- with accelerated virtual IOMMUs.
-
- Say Y here if you are doing development and testing on this feature.
-
-config ARM_SMMU_V3_KUNIT_TEST
- tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
- depends on KUNIT
- depends on ARM_SMMU_V3_SVA
- default KUNIT_ALL_TESTS
- help
- Enable this option to unit-test arm-smmu-v3 driver functions.
-
- If unsure, say N.
-
-config TEGRA241_CMDQV
- bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
- depends on ACPI
- help
- Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
- CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
- support, except with virtualization capabilities.
-
- Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
- CMDQ-V extension.
-endif
-
config S390_IOMMU
def_bool y if S390 && PCI
depends on S390 && PCI
@@ -494,18 +350,6 @@ config MTK_IOMMU_V1
if unsure, say N here.
-config QCOM_IOMMU
- # Note: iommu drivers cannot (yet?) be built as modules
- bool "Qualcomm IOMMU Support"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select QCOM_SCM
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU
- help
- Support for IOMMU on certain Qualcomm SoCs.
-
config HYPERV_IOMMU
bool "Hyper-V IRQ Handling"
depends on HYPERV && X86
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 5e5a83c6c2aa..355294fa9033 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += amd/ intel/ arm/ iommufd/ riscv/
+obj-y += arm/ iommufd/
+obj-$(CONFIG_AMD_IOMMU) += amd/
+obj-$(CONFIG_INTEL_IOMMU) += intel/
+obj-$(CONFIG_RISCV_IOMMU) += riscv/
obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu-pages.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index 9de33b2d42f5..59c04a67f398 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
+obj-y += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 220c598b7e14..29a8864381c3 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -147,6 +147,8 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
}
+bool amd_iommu_ht_range_ignore(void);
+
/*
* This must be called after device probe completes. During probe
* use rlookup_amd_iommu() get the iommu.
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5089b58e528a..ccbab3a4811a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -29,8 +29,6 @@
* some size calculation constants
*/
#define DEV_TABLE_ENTRY_SIZE 32
-#define ALIAS_TABLE_ENTRY_SIZE 2
-#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET 0x00
@@ -111,6 +109,7 @@
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
+#define FEATURE_HT_RANGE_IGNORE BIT_ULL(11)
#define FEATURE_NUM_INT_REMAP_SUP GENMASK_ULL(9, 8)
#define FEATURE_NUM_INT_REMAP_SUP_2K(x) \
@@ -316,6 +315,7 @@
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_REMAP_ENABLE 1ULL
+#define DTE_INTTAB_ALIGNMENT 128
#define DTE_INTTABLEN_MASK (0xfULL << 1)
#define DTE_INTTABLEN_VALUE_512 9ULL
#define DTE_INTTABLEN_512 (DTE_INTTABLEN_VALUE_512 << 1)
@@ -616,12 +616,6 @@ struct amd_iommu_pci_seg {
/* Size of the device table */
u32 dev_table_size;
- /* Size of the alias table */
- u32 alias_table_size;
-
- /* Size of the rlookup table */
- u32 rlookup_table_size;
-
/*
* device table virtual address
*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 14aa0d77df26..c06b62f87b9b 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -243,17 +243,14 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
-static inline unsigned long tbl_size(int entry_size, int last_bdf)
+int amd_iommu_get_num_iommus(void)
{
- unsigned shift = PAGE_SHIFT +
- get_order((last_bdf + 1) * entry_size);
-
- return 1UL << shift;
+ return amd_iommus_present;
}
-int amd_iommu_get_num_iommus(void)
+bool amd_iommu_ht_range_ignore(void)
{
- return amd_iommus_present;
+ return check_feature2(FEATURE_HT_RANGE_IGNORE);
}
/*
@@ -634,8 +631,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
/* Allocate per PCI segment device table */
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
+ pci_seg->dev_table_size);
if (!pci_seg->dev_table)
return -ENOMEM;
@@ -644,16 +641,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = NULL;
}
/* Allocate per PCI segment IOMMU rlookup table. */
static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->rlookup_table),
+ GFP_KERNEL);
if (pci_seg->rlookup_table == NULL)
return -ENOMEM;
@@ -662,17 +659,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->rlookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->rlookup_table);
pci_seg->rlookup_table = NULL;
}
static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
- kmemleak_alloc(pci_seg->irq_lookup_table,
- pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->irq_lookup_table),
+ GFP_KERNEL);
if (pci_seg->irq_lookup_table == NULL)
return -ENOMEM;
@@ -681,9 +676,7 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- kmemleak_free(pci_seg->irq_lookup_table);
- iommu_free_pages(pci_seg->irq_lookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->irq_lookup_table);
pci_seg->irq_lookup_table = NULL;
}
@@ -691,8 +684,9 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
int i;
- pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->alias_table),
+ GFP_KERNEL);
if (!pci_seg->alias_table)
return -ENOMEM;
@@ -707,8 +701,7 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->alias_table,
- get_order(pci_seg->alias_table_size));
+ kvfree(pci_seg->alias_table);
pci_seg->alias_table = NULL;
}
@@ -719,8 +712,7 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
*/
static int __init alloc_command_buffer(struct amd_iommu *iommu)
{
- iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
- get_order(CMD_BUFFER_SIZE));
+ iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
return iommu->cmd_buf ? 0 : -ENOMEM;
}
@@ -817,20 +809,22 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
static void __init free_command_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
+ iommu_free_pages(iommu->cmd_buf);
}
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
size_t size)
{
- int order = get_order(size);
- void *buf = iommu_alloc_pages(gfp, order);
+ void *buf;
- if (buf &&
- check_feature(FEATURE_SNP) &&
- set_memory_4k((unsigned long)buf, (1 << order))) {
- iommu_free_pages(buf, order);
- buf = NULL;
+ size = PAGE_ALIGN(size);
+ buf = iommu_alloc_pages_sz(gfp, size);
+ if (!buf)
+ return NULL;
+ if (check_feature(FEATURE_SNP) &&
+ set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
+ iommu_free_pages(buf);
+ return NULL;
}
return buf;
@@ -873,14 +867,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
static void __init free_event_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
+ iommu_free_pages(iommu->evt_buf);
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE));
- iommu_free_pages(iommu->ga_log_tail, get_order(8));
+ iommu_free_pages(iommu->ga_log);
+ iommu_free_pages(iommu->ga_log_tail);
#endif
}
@@ -925,11 +919,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
- iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
+ iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE);
if (!iommu->ga_log)
goto err_out;
- iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
+ iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8);
if (!iommu->ga_log_tail)
goto err_out;
@@ -950,7 +944,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
static void __init free_cwwb_sem(struct amd_iommu *iommu)
{
if (iommu->cmd_sem)
- iommu_free_page((void *)iommu->cmd_sem);
+ iommu_free_pages((void *)iommu->cmd_sem);
}
static void iommu_enable_xt(struct amd_iommu *iommu)
@@ -1024,8 +1018,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
if (!old_devtb)
return false;
- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
+ GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
@@ -1599,9 +1593,9 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
pci_seg->last_bdf = last_bdf;
DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
- pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->dev_table_size =
+ max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE),
+ SZ_4K);
pci_seg->id = id;
init_llist_head(&pci_seg->dev_data_list);
@@ -2789,8 +2783,7 @@ static void early_enable_iommus(void)
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
- iommu_free_pages(pci_seg->old_dev_tbl_cpy,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->old_dev_tbl_cpy);
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2803,8 +2796,7 @@ static void early_enable_iommus(void)
pr_info("Copied DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 26cf562dde11..4d308c071134 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -47,14 +47,7 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
return fpte;
}
-static void free_pt_page(u64 *pt, struct list_head *freelist)
-{
- struct page *p = virt_to_page(pt);
-
- list_add_tail(&p->lru, freelist);
-}
-
-static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
+static void free_pt_lvl(u64 *pt, struct iommu_pages_list *freelist, int lvl)
{
u64 *p;
int i;
@@ -77,20 +70,20 @@ static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
if (lvl > 2)
free_pt_lvl(p, freelist, lvl - 1);
else
- free_pt_page(p, freelist);
+ iommu_pages_list_add(freelist, p);
}
- free_pt_page(pt, freelist);
+ iommu_pages_list_add(freelist, pt);
}
-static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
+static void free_sub_pt(u64 *root, int mode, struct iommu_pages_list *freelist)
{
switch (mode) {
case PAGE_MODE_NONE:
case PAGE_MODE_7_LEVEL:
break;
case PAGE_MODE_1_LEVEL:
- free_pt_page(root, freelist);
+ iommu_pages_list_add(freelist, root);
break;
case PAGE_MODE_2_LEVEL:
case PAGE_MODE_3_LEVEL:
@@ -121,7 +114,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
bool ret = true;
u64 *pte;
- pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ pte = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp, SZ_4K);
if (!pte)
return false;
@@ -146,7 +139,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
out:
spin_unlock_irqrestore(&domain->lock, flags);
- iommu_free_page(pte);
+ iommu_free_pages(pte);
return ret;
}
@@ -213,7 +206,8 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ page = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp,
+ SZ_4K);
if (!page)
return NULL;
@@ -222,7 +216,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -299,7 +293,8 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
return pte;
}
-static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
+static void free_clear_pte(u64 *pte, u64 pteval,
+ struct iommu_pages_list *freelist)
{
u64 *pt;
int mode;
@@ -328,7 +323,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
int prot, gfp_t gfp, size_t *mapped)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
@@ -353,7 +348,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
for (i = 0; i < count; ++i)
free_clear_pte(&pte[i], pte[i], &freelist);
- if (!list_empty(&freelist))
+ if (!iommu_pages_list_empty(&freelist))
updated = true;
if (count > 1) {
@@ -524,7 +519,7 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
static void v1_free_pgtable(struct io_pgtable *iop)
{
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (pgtable->mode == PAGE_MODE_NONE)
return;
@@ -541,7 +536,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->root =
+ iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index a56a27396305..b47941353ccb 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -121,10 +121,10 @@ static void free_pgtable(u64 *pt, int level)
if (level > 2)
free_pgtable(p, level - 1);
else
- iommu_free_page(p);
+ iommu_free_pages(p);
}
- iommu_free_page(pt);
+ iommu_free_pages(pt);
}
/* Allocate page table */
@@ -152,14 +152,14 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
}
if (!IOMMU_PTE_PRESENT(__pte)) {
- page = iommu_alloc_page_node(nid, gfp);
+ page = iommu_alloc_pages_node_sz(nid, gfp, SZ_4K);
if (!page)
return NULL;
__npte = set_pgtable_attr(page);
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -181,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
if (pg_size == IOMMU_PAGE_SIZE_1G)
free_pgtable(__pte, end_level - 1);
else if (pg_size == IOMMU_PAGE_SIZE_2M)
- iommu_free_page(__pte);
+ iommu_free_pages(__pte);
}
return pte;
@@ -346,7 +346,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->pgd = iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->pgd)
return NULL;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index f34209b08b4c..3117d99cf83d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -241,7 +241,9 @@ static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct acpihid_map_entry *p;
+ struct acpihid_map_entry *p, *p1 = NULL;
+ int hid_count = 0;
+ bool fw_bug;
if (!adev)
return -ENODEV;
@@ -249,12 +251,33 @@ static inline int get_acpihid_device_id(struct device *dev,
list_for_each_entry(p, &acpihid_map, list) {
if (acpi_dev_hid_uid_match(adev, p->hid,
p->uid[0] ? p->uid : NULL)) {
- if (entry)
- *entry = p;
- return p->devid;
+ p1 = p;
+ fw_bug = false;
+ hid_count = 1;
+ break;
+ }
+
+ /*
+ * Count HID matches w/o UID, raise FW_BUG but allow exactly one match
+ */
+ if (acpi_dev_hid_match(adev, p->hid)) {
+ p1 = p;
+ hid_count++;
+ fw_bug = true;
}
}
- return -EINVAL;
+
+ if (!p1)
+ return -EINVAL;
+ if (fw_bug)
+ dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
+ hid_count, hid_count > 1 ? "s" : "");
+ if (hid_count > 1)
+ return -EINVAL;
+ if (entry)
+ *entry = p1;
+
+ return p1->devid;
}
static inline int get_device_sbdf_id(struct device *dev)
@@ -982,6 +1005,14 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{
iommu_ga_log_notifier = notifier;
+ /*
+ * Ensure all in-flight IRQ handlers run to completion before returning
+ * to the caller, e.g. to ensure module code isn't unloaded while it's
+ * being executed in the IRQ handler.
+ */
+ if (!notifier)
+ synchronize_rcu();
+
return 0;
}
EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
@@ -1812,7 +1843,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
- iommu_free_page(ptr);
+ iommu_free_pages(ptr);
}
}
@@ -1845,7 +1876,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
/* Free per device domain ID */
pdom_id_free(gcr3_info->domid);
- iommu_free_page(gcr3_info->gcr3_tbl);
+ iommu_free_pages(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL;
}
@@ -1884,7 +1915,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
return -ENOSPC;
gcr3_info->domid = domid;
- gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
+ gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
if (gcr3_info->gcr3_tbl == NULL) {
pdom_id_free(domid);
return -ENOMEM;
@@ -2908,6 +2939,9 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
list_add_tail(&region->list, head);
+ if (amd_iommu_ht_range_ignore())
+ return;
+
region = iommu_alloc_resv_region(HT_RANGE_START,
HT_RANGE_END - HT_RANGE_START + 1,
0, IOMMU_RESV_RESERVED, GFP_KERNEL);
@@ -2984,38 +3018,6 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
-static int amd_iommu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int amd_iommu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.blocked_domain = &blocked_domain,
@@ -3029,8 +3031,6 @@ const struct iommu_ops amd_iommu_ops = {
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.def_domain_type = amd_iommu_def_domain_type,
- .dev_enable_feat = amd_iommu_dev_enable_feature,
- .dev_disable_feat = amd_iommu_dev_disable_feature,
.page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
@@ -3129,7 +3129,7 @@ static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
return table;
}
-static struct irq_remap_table *__alloc_irq_table(int nid, int order)
+static struct irq_remap_table *__alloc_irq_table(int nid, size_t size)
{
struct irq_remap_table *table;
@@ -3137,7 +3137,8 @@ static struct irq_remap_table *__alloc_irq_table(int nid, int order)
if (!table)
return NULL;
- table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order);
+ table->table = iommu_alloc_pages_node_sz(
+ nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size));
if (!table->table) {
kfree(table);
return NULL;
@@ -3191,7 +3192,6 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
struct irq_remap_table *new_table = NULL;
struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
- int order = get_order(get_irq_table_size(max_irqs));
int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
u16 alias;
@@ -3211,7 +3211,7 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- new_table = __alloc_irq_table(nid, order);
+ new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs));
if (!new_table)
return NULL;
@@ -3246,7 +3246,7 @@ out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
- iommu_free_pages(new_table->table, order);
+ iommu_free_pages(new_table->table);
kfree(new_table);
}
return table;
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
index 7c67d69f0b8c..e6767c057d01 100644
--- a/drivers/iommu/amd/ppr.c
+++ b/drivers/iommu/amd/ppr.c
@@ -48,7 +48,7 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
+ iommu_free_pages(iommu->ppr_log);
}
/*
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index e13501541fdd..757d24f67ad4 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -776,8 +776,7 @@ static void apple_dart_domain_free(struct iommu_domain *domain)
{
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
- if (dart_domain->pgtbl_ops)
- free_io_pgtable_ops(dart_domain->pgtbl_ops);
+ free_io_pgtable_ops(dart_domain->pgtbl_ops);
kfree(dart_domain);
}
diff --git a/drivers/iommu/arm/Kconfig b/drivers/iommu/arm/Kconfig
new file mode 100644
index 000000000000..ef42bbe07dbe
--- /dev/null
+++ b/drivers/iommu/arm/Kconfig
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# ARM IOMMU support
+config ARM_SMMU
+ tristate "ARM Ltd. System MMU (SMMU) Support"
+ depends on ARM64 || ARM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU if ARM
+ help
+ Support for implementations of the ARM System MMU architecture
+ versions 1 and 2.
+
+ Say Y here if your SoC includes an IOMMU device implementing
+ the ARM SMMU architecture.
+
+if ARM_SMMU
+config ARM_SMMU_LEGACY_DT_BINDINGS
+ bool "Support the legacy \"mmu-masters\" devicetree bindings"
+ depends on ARM_SMMU=y && OF
+ help
+ Support for the badly designed and deprecated "mmu-masters"
+ devicetree bindings. This allows some DMA masters to attach
+ to the SMMU but does not provide any support via the DMA API.
+ If you're lucky, you might be able to get VFIO up and running.
+
+ If you say Y here then you'll make me very sad. Instead, say N
+ and move your firmware to the utopian future that was 2016.
+
+config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
+ bool "Disable unmatched stream bypass by default" if EXPERT
+ default y
+ help
+ If your firmware is broken and fails to describe StreamIDs which
+ Linux should know about in order to manage the SMMU correctly and
+ securely, and you don't want to boot with the 'arm-smmu.disable_bypass=0'
+ command line parameter, then as a last resort you can turn it off
+ by default here. But don't. This option may be removed at any time.
+
+ Note that 'arm-smmu.disable_bypass=1' will still take precedence.
+
+config ARM_SMMU_MMU_500_CPRE_ERRATA
+ bool "Enable errata workaround for CPRE in SMMU reset path"
+ default y
+ help
+ Say Y here (by default) to apply workaround to disable
+ MMU-500's next-page prefetcher for sake of 4 known errata.
+
+ Say N here only when it is sure that any errata related to
+ prefetch enablement are not applicable on the platform.
+ Refer silicon-errata.rst for info on errata IDs.
+
+config ARM_SMMU_QCOM
+ def_tristate y
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ When running on a Qualcomm platform that has the custom variant
+ of the ARM SMMU, this needs to be built into the SMMU driver.
+
+config ARM_SMMU_QCOM_DEBUG
+ bool "ARM SMMU QCOM implementation defined debug support"
+ depends on ARM_SMMU_QCOM=y
+ help
+ Support for implementation specific debug features in ARM SMMU
+ hardware found in QTI platforms. This include support for
+ the Translation Buffer Units (TBU) that can be used to obtain
+ additional information when debugging memory management issues
+ like context faults.
+
+ Say Y here to enable debug for issues such as context faults
+ or TLB sync timeouts which requires implementation defined
+ register dumps.
+endif
+
+config ARM_SMMU_V3
+ tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+ depends on ARM64
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select GENERIC_MSI_IRQ
+ select IOMMUFD_DRIVER if IOMMUFD
+ help
+ Support for implementations of the ARM System MMU architecture
+ version 3 providing translation support to a PCIe root complex.
+
+ Say Y here if your system includes an IOMMU device implementing
+ the ARM SMMUv3 architecture.
+
+if ARM_SMMU_V3
+config ARM_SMMU_V3_SVA
+ bool "Shared Virtual Addressing support for the ARM SMMUv3"
+ select IOMMU_SVA
+ select IOMMU_IOPF
+ select MMU_NOTIFIER
+ help
+ Support for sharing process address spaces with devices using the
+ SMMUv3.
+
+ Say Y here if your system supports SVA extensions such as PCIe PASID
+ and PRI.
+
+config ARM_SMMU_V3_IOMMUFD
+ bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
+ depends on IOMMUFD
+ help
+ Support for IOMMUFD features intended to support virtual machines
+ with accelerated virtual IOMMUs.
+
+ Say Y here if you are doing development and testing on this feature.
+
+config ARM_SMMU_V3_KUNIT_TEST
+ tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on ARM_SMMU_V3_SVA
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to unit-test arm-smmu-v3 driver functions.
+
+ If unsure, say N.
+
+config TEGRA241_CMDQV
+ bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
+ depends on ACPI
+ help
+ Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
+ CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
+ support, except with virtualization capabilities.
+
+ Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
+ CMDQ-V extension.
+endif
+
+config QCOM_IOMMU
+ # Note: iommu drivers cannot (yet?) be built as modules
+ bool "Qualcomm IOMMU Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select QCOM_SCM
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for IOMMU on certain Qualcomm SoCs.
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 980cc6b33c43..0601dece0a0d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -13,8 +13,6 @@
#include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h"
-static DEFINE_MUTEX(sva_lock);
-
static void __maybe_unused
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{
@@ -257,84 +255,6 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return true;
}
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- /* We're not keeping track of SIDs in fault events */
- if (master->num_streams != 1)
- return false;
-
- return master->stall_enabled;
-}
-
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
- return false;
-
- /* SSID support is mandatory for the moment */
- return master->ssid_bits;
-}
-
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- bool enabled;
-
- mutex_lock(&sva_lock);
- enabled = master->sva_enabled;
- mutex_unlock(&sva_lock);
- return enabled;
-}
-
-static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- /*
- * Drivers for devices supporting PRI or stall should enable IOPF first.
- * Others have device-specific fault handlers and don't need IOPF.
- */
- if (!arm_smmu_master_iopf_supported(master))
- return 0;
-
- if (!master->iopf_enabled)
- return -EINVAL;
-
- return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
-}
-
-static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- if (!master->iopf_enabled)
- return;
-
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
-}
-
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- int ret;
-
- mutex_lock(&sva_lock);
- ret = arm_smmu_master_sva_enable_iopf(master);
- if (!ret)
- master->sva_enabled = true;
- mutex_unlock(&sva_lock);
-
- return ret;
-}
-
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- mutex_lock(&sva_lock);
- arm_smmu_master_sva_disable_iopf(master);
- master->sva_enabled = false;
- mutex_unlock(&sva_lock);
-
- return 0;
-}
-
void arm_smmu_sva_notifier_synchronize(void)
{
/*
@@ -353,6 +273,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_cd target;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return -EOPNOTSUPP;
+
/* Prevent arm_smmu_mm_release from being called while we are attaching */
if (!mmget_not_zero(domain->mm))
return -EINVAL;
@@ -406,6 +329,9 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
u32 asid;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return ERR_PTR(-EOPNOTSUPP);
+
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 48d910399a1b..10cc6dc26b7b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2720,6 +2720,7 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static struct arm_smmu_master_domain *
arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
+ struct iommu_domain *domain,
struct arm_smmu_master *master,
ioasid_t ssid, bool nested_ats_flush)
{
@@ -2730,6 +2731,7 @@ arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
if (master_domain->master == master &&
+ master_domain->domain == domain &&
master_domain->ssid == ssid &&
master_domain->nested_ats_flush == nested_ats_flush)
return master_domain;
@@ -2756,6 +2758,58 @@ to_smmu_domain_devices(struct iommu_domain *domain)
return NULL;
}
+static int arm_smmu_enable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ int ret;
+
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return -EOPNOTSUPP;
+
+ /*
+ * Drivers for devices supporting PRI or stall require iopf others have
+ * device-specific fault handlers and don't need IOPF, so this is not a
+ * failure.
+ */
+ if (!master->stall_enabled)
+ return 0;
+
+ /* We're not keeping track of SIDs in fault events */
+ if (master->num_streams != 1)
+ return -EOPNOTSUPP;
+
+ if (master->iopf_refcount) {
+ master->iopf_refcount++;
+ master_domain->using_iopf = true;
+ return 0;
+ }
+
+ ret = iopf_queue_add_device(master->smmu->evtq.iopf, master->dev);
+ if (ret)
+ return ret;
+ master->iopf_refcount = 1;
+ master_domain->using_iopf = true;
+ return 0;
+}
+
+static void arm_smmu_disable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return;
+
+ if (!master_domain || !master_domain->using_iopf)
+ return;
+
+ master->iopf_refcount--;
+ if (master->iopf_refcount == 0)
+ iopf_queue_remove_device(master->smmu->evtq.iopf, master->dev);
+}
+
static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
struct iommu_domain *domain,
ioasid_t ssid)
@@ -2772,15 +2826,17 @@ static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
nested_ats_flush = to_smmu_nested_domain(domain)->enable_ats;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid,
- nested_ats_flush);
+ master_domain = arm_smmu_find_master_domain(smmu_domain, domain, master,
+ ssid, nested_ats_flush);
if (master_domain) {
list_del(&master_domain->devices_elm);
- kfree(master_domain);
if (master->ats_enabled)
atomic_dec(&smmu_domain->nr_ats_masters);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ arm_smmu_disable_iopf(master, master_domain);
+ kfree(master_domain);
}
/*
@@ -2853,12 +2909,19 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
kfree(state->vmaster);
return -ENOMEM;
}
+ master_domain->domain = new_domain;
master_domain->master = master;
master_domain->ssid = state->ssid;
if (new_domain->type == IOMMU_DOMAIN_NESTED)
master_domain->nested_ats_flush =
to_smmu_nested_domain(new_domain)->enable_ats;
+ if (new_domain->iopf_handler) {
+ ret = arm_smmu_enable_iopf(master, master_domain);
+ if (ret)
+ goto err_free_master_domain;
+ }
+
/*
* During prepare we want the current smmu_domain and new
* smmu_domain to be in the devices list before we change any
@@ -2878,9 +2941,9 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
!arm_smmu_master_canwbs(master)) {
spin_unlock_irqrestore(&smmu_domain->devices_lock,
flags);
- kfree(master_domain);
kfree(state->vmaster);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iopf;
}
if (state->ats_enabled)
@@ -2899,6 +2962,12 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
wmb();
}
return 0;
+
+err_iopf:
+ arm_smmu_disable_iopf(master, master_domain);
+err_free_master_domain:
+ kfree(master_domain);
+ return ret;
}
/*
@@ -2953,7 +3022,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
smmu = master->smmu;
if (smmu_domain->smmu != smmu)
- return ret;
+ return -EINVAL;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID);
@@ -3510,8 +3579,7 @@ static void arm_smmu_release_device(struct device *dev)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (WARN_ON(arm_smmu_master_sva_enabled(master)))
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+ WARN_ON(master->iopf_refcount);
/* Put the STE back to what arm_smmu_init_strtab() sets */
if (dev->iommu->require_direct)
@@ -3586,58 +3654,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static int arm_smmu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -ENODEV;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!arm_smmu_master_iopf_supported(master))
- return -EINVAL;
- if (master->iopf_enabled)
- return -EBUSY;
- master->iopf_enabled = true;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_supported(master))
- return -EINVAL;
- if (arm_smmu_master_sva_enabled(master))
- return -EBUSY;
- return arm_smmu_master_enable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
-static int arm_smmu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -EINVAL;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!master->iopf_enabled)
- return -EINVAL;
- if (master->sva_enabled)
- return -EBUSY;
- master->iopf_enabled = false;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_enabled(master))
- return -EINVAL;
- return arm_smmu_master_disable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
/*
* HiSilicon PCIe tune and trace device can be used to trace TLP headers on the
* PCIe link and save the data to memory by DMA. The hardware is restricted to
@@ -3670,8 +3686,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .dev_enable_feat = arm_smmu_dev_enable_feature,
- .dev_disable_feat = arm_smmu_dev_disable_feature,
.page_response = arm_smmu_page_response,
.def_domain_type = arm_smmu_def_domain_type,
.viommu_alloc = arm_vsmmu_alloc,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index dd1ad56ce863..ea41d790463e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -837,9 +837,8 @@ struct arm_smmu_master {
bool ats_enabled : 1;
bool ste_ats_enabled : 1;
bool stall_enabled;
- bool sva_enabled;
- bool iopf_enabled;
unsigned int ssid_bits;
+ unsigned int iopf_refcount;
};
/* SMMU private data for an IOMMU domain */
@@ -915,8 +914,14 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
struct arm_smmu_master_domain {
struct list_head devices_elm;
struct arm_smmu_master *master;
+ /*
+ * For nested domains the master_domain is threaded onto the S2 parent,
+ * this points to the IOMMU_DOMAIN_NESTED to disambiguate the masters.
+ */
+ struct iommu_domain *domain;
ioasid_t ssid;
bool nested_ats_flush : 1;
+ bool using_iopf : 1;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -995,11 +1000,6 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
void arm_smmu_sva_notifier_synchronize(void);
struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm);
@@ -1009,31 +1009,6 @@ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return false;
}
-static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
static inline void arm_smmu_sva_notifier_synchronize(void) {}
#define arm_smmu_sva_domain_alloc NULL
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index d03b2239baad..65e0ef6539fe 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -406,6 +406,12 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
@@ -416,6 +422,9 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
if (!tmp || tmp == -EBUSY) {
ret = IRQ_HANDLED;
resume = ARM_SMMU_RESUME_TERMINATE;
+ } else if (tmp == -EAGAIN) {
+ ret = IRQ_HANDLED;
+ resume = 0;
} else {
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, cfi.iova, cfi.fsr);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 59d02687280e..62874b18f645 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -112,25 +112,39 @@ static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
{
struct arm_smmu_domain *smmu_domain = (void *)cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ u32 mask = BIT(cfg->cbndx);
+ bool stall_changed = !!(qsmmu->stall_enabled & mask) != enabled;
+ unsigned long flags;
if (enabled)
- qsmmu->stall_enabled |= BIT(cfg->cbndx);
+ qsmmu->stall_enabled |= mask;
else
- qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
-}
+ qsmmu->stall_enabled &= ~mask;
-static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
-{
- struct arm_smmu_domain *smmu_domain = (void *)cookie;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- u32 reg = 0;
+ /*
+ * If the device is on and we changed the setting, update the register.
+ * The spec pseudocode says that CFCFG is resampled after a fault, and
+ * we believe that no implementations cache it in the TLB, so it should
+ * be safe to change it without a TLB invalidation.
+ */
+ if (stall_changed && pm_runtime_get_if_active(smmu->dev) > 0) {
+ u32 reg;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR);
+
+ if (enabled)
+ reg |= ARM_SMMU_SCTLR_CFCFG;
+ else
+ reg &= ~ARM_SMMU_SCTLR_CFCFG;
- if (terminate)
- reg |= ARM_SMMU_RESUME_TERMINATE;
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR, reg);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
- arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
+ pm_runtime_put_autosuspend(smmu->dev);
+ }
}
static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
@@ -337,7 +351,6 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
- priv->resume_translation = qcom_adreno_smmu_resume_translation;
priv->set_prr_bit = NULL;
priv->set_prr_addr = NULL;
@@ -356,6 +369,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,qcm2290-mdss" },
+ { .compatible = "qcom,sar2130p-mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
{ .compatible = "qcom,sc7280-mdss" },
@@ -585,6 +599,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
@@ -594,6 +609,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 8f439c265a23..8d95b14c7d5a 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -474,6 +474,12 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 98f7205ec8fb..6c708fec48d1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -106,7 +106,7 @@ early_param("iommu.forcedac", iommu_dma_forcedac_setup);
struct iova_fq_entry {
unsigned long iova_pfn;
unsigned long pages;
- struct list_head freelist;
+ struct iommu_pages_list freelist;
u64 counter; /* Flush counter when this entry was added */
};
@@ -155,6 +155,8 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
+ fq->entries[idx].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[idx].freelist);
fq->head = (fq->head + 1) & fq->mod_mask;
}
}
@@ -193,7 +195,7 @@ static void fq_flush_timeout(struct timer_list *t)
static void queue_iova(struct iommu_dma_cookie *cookie,
unsigned long pfn, unsigned long pages,
- struct list_head *freelist)
+ struct iommu_pages_list *freelist)
{
struct iova_fq *fq;
unsigned long flags;
@@ -232,7 +234,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
- list_splice(freelist, &fq->entries[idx].freelist);
+ iommu_pages_list_splice(freelist, &fq->entries[idx].freelist);
spin_unlock_irqrestore(&fq->lock, flags);
@@ -290,7 +292,8 @@ static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
spin_lock_init(&fq->lock);
for (i = 0; i < fq_size; i++)
- INIT_LIST_HEAD(&fq->entries[i].freelist);
+ fq->entries[i].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[i].freelist);
}
static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 317266aca6e2..fcb6a0f7c082 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -902,11 +902,11 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
if (!domain)
return NULL;
- domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2);
+ domain->pgtable = iommu_alloc_pages_sz(GFP_KERNEL, SZ_16K);
if (!domain->pgtable)
goto err_pgtable;
- domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1);
+ domain->lv2entcnt = iommu_alloc_pages_sz(GFP_KERNEL, SZ_8K);
if (!domain->lv2entcnt)
goto err_counter;
@@ -932,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
return &domain->domain;
err_lv2ent:
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->lv2entcnt);
err_counter:
- iommu_free_pages(domain->pgtable, 2);
+ iommu_free_pages(domain->pgtable);
err_pgtable:
kfree(domain);
return NULL;
@@ -975,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
phys_to_virt(base));
}
- iommu_free_pages(domain->pgtable, 2);
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->pgtable);
+ iommu_free_pages(domain->lv2entcnt);
kfree(domain);
}
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 30be786bff11..5f08523f97cb 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -64,7 +64,7 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_update_paace_stash(liodn, val);
if (ret) {
- pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
+ pr_debug("Failed to update SPAACE for liodn %d\n", liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return ret;
}
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index 6c7528130cf9..ada651c4a01b 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o
-obj-$(CONFIG_DMAR_TABLE) += trace.o
+obj-y += iommu.o pasid.o nested.o cache.o prq.o
+obj-$(CONFIG_DMAR_TABLE) += dmar.o trace.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
-ifdef CONFIG_INTEL_IOMMU
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
-endif
obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index e540092d664d..b61d9ea27aa9 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1099,6 +1099,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
spin_lock_init(&iommu->device_rbtree_lock);
mutex_init(&iommu->iopf_lock);
iommu->node = NUMA_NO_NODE;
+ spin_lock_init(&iommu->lock);
+ ida_init(&iommu->domain_ida);
+ mutex_init(&iommu->did_lock);
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
@@ -1187,7 +1190,7 @@ static void free_iommu(struct intel_iommu *iommu)
}
if (iommu->qi) {
- iommu_free_page(iommu->qi->desc);
+ iommu_free_pages(iommu->qi->desc);
kfree(iommu->qi->desc_status);
kfree(iommu->qi);
}
@@ -1195,6 +1198,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
+ ida_destroy(&iommu->domain_ida);
ida_free(&dmar_seq_ids, iommu->seq_id);
kfree(iommu);
}
@@ -1681,7 +1685,6 @@ int dmar_enable_qi(struct intel_iommu *iommu)
{
struct q_inval *qi;
void *desc;
- int order;
if (!ecap_qis(iommu->ecap))
return -ENOENT;
@@ -1702,8 +1705,9 @@ int dmar_enable_qi(struct intel_iommu *iommu)
* Need two pages to accommodate 256 descriptors of 256 bits each
* if the remapping hardware supports scalable mode translation.
*/
- order = ecap_smts(iommu->ecap) ? 1 : 0;
- desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
+ desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ ecap_smts(iommu->ecap) ? SZ_8K :
+ SZ_4K);
if (!desc) {
kfree(qi);
iommu->qi = NULL;
@@ -1714,7 +1718,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
- iommu_free_page(qi->desc);
+ iommu_free_pages(qi->desc);
kfree(qi);
iommu->qi = NULL;
return -ENOMEM;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index cb0b993bebb4..7aa3932251b2 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -397,7 +397,8 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
if (!alloc)
return NULL;
- context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ SZ_4K);
if (!context)
return NULL;
@@ -571,17 +572,17 @@ static void free_context_table(struct intel_iommu *iommu)
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
- iommu_free_page(context);
+ iommu_free_pages(context);
if (!sm_supported(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
- iommu_free_page(context);
+ iommu_free_pages(context);
}
- iommu_free_page(iommu->root_entry);
+ iommu_free_pages(iommu->root_entry);
iommu->root_entry = NULL;
}
@@ -731,7 +732,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) {
uint64_t pteval, tmp;
- tmp_page = iommu_alloc_page_node(domain->nid, gfp);
+ tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp,
+ SZ_4K);
if (!tmp_page)
return NULL;
@@ -745,7 +747,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
tmp = 0ULL;
if (!try_cmpxchg64(&pte->val, &tmp, pteval))
/* Someone else set it while we were thinking; use theirs. */
- iommu_free_page(tmp_page);
+ iommu_free_pages(tmp_page);
else
domain_flush_cache(domain, pte, sizeof(*pte));
}
@@ -858,7 +860,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
- iommu_free_page(level_pte);
+ iommu_free_pages(level_pte);
}
next:
pfn += level_size(level);
@@ -882,7 +884,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- iommu_free_page(domain->pgd);
+ iommu_free_pages(domain->pgd);
domain->pgd = NULL;
}
}
@@ -894,18 +896,16 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
The 'pte' argument is the *parent* PTE, pointing to the page that is to
be freed. */
static void dma_pte_list_pagetables(struct dmar_domain *domain,
- int level, struct dma_pte *pte,
- struct list_head *freelist)
+ int level, struct dma_pte *parent_pte,
+ struct iommu_pages_list *freelist)
{
- struct page *pg;
+ struct dma_pte *pte = phys_to_virt(dma_pte_addr(parent_pte));
- pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
- list_add_tail(&pg->lru, freelist);
+ iommu_pages_list_add(freelist, pte);
if (level == 1)
return;
- pte = page_address(pg);
do {
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
dma_pte_list_pagetables(domain, level - 1, pte, freelist);
@@ -916,7 +916,7 @@ static void dma_pte_list_pagetables(struct dmar_domain *domain,
static void dma_pte_clear_level(struct dmar_domain *domain, int level,
struct dma_pte *pte, unsigned long pfn,
unsigned long start_pfn, unsigned long last_pfn,
- struct list_head *freelist)
+ struct iommu_pages_list *freelist)
{
struct dma_pte *first_pte = NULL, *last_pte = NULL;
@@ -961,7 +961,8 @@ next:
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
- unsigned long last_pfn, struct list_head *freelist)
+ unsigned long last_pfn,
+ struct iommu_pages_list *freelist)
{
if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
WARN_ON(start_pfn > last_pfn))
@@ -973,8 +974,7 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- struct page *pgd_page = virt_to_page(domain->pgd);
- list_add_tail(&pgd_page->lru, freelist);
+ iommu_pages_list_add(freelist, domain->pgd);
domain->pgd = NULL;
}
}
@@ -984,7 +984,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
@@ -1289,52 +1289,13 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-static int iommu_init_domains(struct intel_iommu *iommu)
-{
- u32 ndomains;
-
- ndomains = cap_ndoms(iommu->cap);
- pr_debug("%s: Number of Domains supported <%d>\n",
- iommu->name, ndomains);
-
- spin_lock_init(&iommu->lock);
-
- iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL);
- if (!iommu->domain_ids)
- return -ENOMEM;
-
- /*
- * If Caching mode is set, then invalid translations are tagged
- * with domain-id 0, hence we need to pre-allocate it. We also
- * use domain-id 0 as a marker for non-allocated domain-id, so
- * make sure it is not used for a real domain.
- */
- set_bit(0, iommu->domain_ids);
-
- /*
- * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
- * entry for first-level or pass-through translation modes should
- * be programmed with a domain id different from those used for
- * second-level or nested translation. We reserve a domain id for
- * this purpose. This domain id is also used for identity domain
- * in legacy mode.
- */
- set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
-
- return 0;
-}
-
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- if (!iommu->domain_ids)
- return;
-
/*
* All iommu domains must have been detached from the devices,
* hence there should be no domain IDs in use.
*/
- if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
- > NUM_RESERVED_DID))
+ if (WARN_ON(!ida_is_empty(&iommu->domain_ida)))
return;
if (iommu->gcmd & DMA_GCMD_TE)
@@ -1343,11 +1304,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
- if (iommu->domain_ids) {
- bitmap_free(iommu->domain_ids);
- iommu->domain_ids = NULL;
- }
-
if (iommu->copied_tables) {
bitmap_free(iommu->copied_tables);
iommu->copied_tables = NULL;
@@ -1380,7 +1336,6 @@ static bool first_level_by_default(struct intel_iommu *iommu)
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{
struct iommu_domain_info *info, *curr;
- unsigned long ndomains;
int num, ret = -ENOSPC;
if (domain->domain.type == IOMMU_DOMAIN_SVA)
@@ -1390,40 +1345,36 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
if (!info)
return -ENOMEM;
- spin_lock(&iommu->lock);
+ guard(mutex)(&iommu->did_lock);
curr = xa_load(&domain->iommu_array, iommu->seq_id);
if (curr) {
curr->refcnt++;
- spin_unlock(&iommu->lock);
kfree(info);
return 0;
}
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
+ num = ida_alloc_range(&iommu->domain_ida, IDA_START_DID,
+ cap_ndoms(iommu->cap) - 1, GFP_KERNEL);
+ if (num < 0) {
pr_err("%s: No free domain ids\n", iommu->name);
goto err_unlock;
}
- set_bit(num, iommu->domain_ids);
info->refcnt = 1;
info->did = num;
info->iommu = iommu;
curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
- NULL, info, GFP_ATOMIC);
+ NULL, info, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
goto err_clear;
}
- spin_unlock(&iommu->lock);
return 0;
err_clear:
- clear_bit(info->did, iommu->domain_ids);
+ ida_free(&iommu->domain_ida, info->did);
err_unlock:
- spin_unlock(&iommu->lock);
kfree(info);
return ret;
}
@@ -1435,21 +1386,21 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
if (domain->domain.type == IOMMU_DOMAIN_SVA)
return;
- spin_lock(&iommu->lock);
+ guard(mutex)(&iommu->did_lock);
info = xa_load(&domain->iommu_array, iommu->seq_id);
if (--info->refcnt == 0) {
- clear_bit(info->did, iommu->domain_ids);
+ ida_free(&iommu->domain_ida, info->did);
xa_erase(&domain->iommu_array, iommu->seq_id);
domain->nid = NUMA_NO_NODE;
kfree(info);
}
- spin_unlock(&iommu->lock);
}
static void domain_exit(struct dmar_domain *domain)
{
if (domain->pgd) {
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist =
+ IOMMU_PAGES_LIST_INIT(freelist);
domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
iommu_put_pages_list(&freelist);
@@ -1681,9 +1632,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
- attr |= DMA_FL_PTE_PRESENT;
if (domain->use_first_level) {
- attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
+ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
if (prot & DMA_PTE_WRITE)
attr |= DMA_FL_PTE_DIRTY;
}
@@ -1859,6 +1809,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
return ret;
info->domain = domain;
+ info->domain_attached = true;
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&domain->lock, flags);
@@ -2027,7 +1978,8 @@ static int copy_context_table(struct intel_iommu *iommu,
if (!old_ce)
goto out;
- new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
+ new_ce = iommu_alloc_pages_node_sz(iommu->node,
+ GFP_KERNEL, SZ_4K);
if (!new_ce)
goto out_unmap;
@@ -2042,7 +1994,7 @@ static int copy_context_table(struct intel_iommu *iommu,
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
- set_bit(did, iommu->domain_ids);
+ ida_alloc_range(&iommu->domain_ida, did, did, GFP_KERNEL);
set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
@@ -2169,11 +2121,6 @@ static int __init init_dmars(void)
}
intel_iommu_init_qi(iommu);
-
- ret = iommu_init_domains(iommu);
- if (ret)
- goto free_iommu;
-
init_translation_status(iommu);
if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
@@ -2651,9 +2598,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- ret = iommu_init_domains(iommu);
- if (ret == 0)
- ret = iommu_alloc_root_entry(iommu);
+ ret = iommu_alloc_root_entry(iommu);
if (ret)
goto out;
@@ -2744,7 +2689,6 @@ static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev)
struct device *tmp;
int i;
- dev = pci_physfn(dev);
rcu_read_lock();
list_for_each_entry_rcu(satcu, &dmar_satc_units, list) {
@@ -2761,15 +2705,16 @@ out:
return satcu;
}
-static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
+static bool dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
{
- int i, ret = 1;
- struct pci_bus *bus;
struct pci_dev *bridge = NULL;
- struct device *tmp;
- struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
struct dmar_satc_unit *satcu;
+ struct acpi_dmar_atsr *atsr;
+ bool supported = true;
+ struct pci_bus *bus;
+ struct device *tmp;
+ int i;
dev = pci_physfn(dev);
satcu = dmar_find_matched_satc_unit(dev);
@@ -2787,11 +2732,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
bridge = bus->self;
/* If it's an integrated device, allow ATS */
if (!bridge)
- return 1;
+ return true;
/* Connected via non-PCIe: no ATS */
if (!pci_is_pcie(bridge) ||
pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
+ return false;
/* If we found the root port, look it up in the ATSR */
if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
break;
@@ -2810,11 +2755,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
if (atsru->include_all)
goto out;
}
- ret = 0;
+ supported = false;
out:
rcu_read_unlock();
- return ret;
+ return supported;
}
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
@@ -2972,9 +2917,14 @@ static ssize_t domains_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
- return sysfs_emit(buf, "%d\n",
- bitmap_weight(iommu->domain_ids,
- cap_ndoms(iommu->cap)));
+ unsigned int count = 0;
+ int id;
+
+ for (id = 0; id < cap_ndoms(iommu->cap); id++)
+ if (ida_exists(&iommu->domain_ida, id))
+ count++;
+
+ return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR_RO(domains_used);
@@ -3257,6 +3207,10 @@ void device_block_translation(struct device *dev)
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
+ /* Device in DMA blocking state. Noting to do. */
+ if (!info->domain_attached)
+ return;
+
if (info->domain)
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
@@ -3268,6 +3222,9 @@ void device_block_translation(struct device *dev)
domain_context_clear(info);
}
+ /* Device now in DMA blocking state. */
+ info->domain_attached = false;
+
if (!info->domain)
return;
@@ -3282,6 +3239,9 @@ void device_block_translation(struct device *dev)
static int blocking_domain_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ iopf_for_domain_remove(info->domain ? &info->domain->domain : NULL, dev);
device_block_translation(dev);
return 0;
}
@@ -3360,7 +3320,7 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
/* always allocate the top pgd */
- domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
+ domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K);
if (!domain->pgd) {
kfree(domain);
return ERR_PTR(-ENOMEM);
@@ -3492,7 +3452,15 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
if (ret)
return ret;
- return dmar_domain_attach_device(to_dmar_domain(domain), dev);
+ ret = iopf_for_domain_set(domain, dev);
+ if (ret)
+ return ret;
+
+ ret = dmar_domain_attach_device(to_dmar_domain(domain), dev);
+ if (ret)
+ iopf_for_domain_remove(domain, dev);
+
+ return ret;
}
static int intel_iommu_map(struct iommu_domain *domain,
@@ -3603,7 +3571,8 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
cache_tag_flush_range(to_dmar_domain(domain), gather->start,
- gather->end, list_empty(&gather->freelist));
+ gather->end,
+ iommu_pages_list_empty(&gather->freelist));
iommu_put_pages_list(&gather->freelist);
}
@@ -3918,6 +3887,8 @@ int intel_iommu_enable_iopf(struct device *dev)
if (!info->pri_enabled)
return -ENODEV;
+ /* pri_enabled is protected by the group mutex. */
+ iommu_group_mutex_assert(dev);
if (info->iopf_refcount) {
info->iopf_refcount++;
return 0;
@@ -3940,43 +3911,13 @@ void intel_iommu_disable_iopf(struct device *dev)
if (WARN_ON(!info->pri_enabled || !info->iopf_refcount))
return;
+ iommu_group_mutex_assert(dev);
if (--info->iopf_refcount)
return;
iopf_queue_remove_device(iommu->iopf_queue, dev);
}
-static int
-intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
-{
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return intel_iommu_enable_iopf(dev);
-
- case IOMMU_DEV_FEAT_SVA:
- return 0;
-
- default:
- return -ENODEV;
- }
-}
-
-static int
-intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
-{
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- intel_iommu_disable_iopf(dev);
- return 0;
-
- case IOMMU_DEV_FEAT_SVA:
- return 0;
-
- default:
- return -ENODEV;
- }
-}
-
static bool intel_iommu_is_attach_deferred(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -4050,6 +3991,7 @@ static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ iopf_for_domain_remove(old, dev);
intel_pasid_tear_down_entry(info->iommu, dev, pasid, false);
domain_remove_dev_pasid(old, dev, pasid);
@@ -4123,6 +4065,10 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
+ ret = iopf_for_domain_replace(domain, old, dev);
+ if (ret)
+ goto out_remove_dev_pasid;
+
if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid, old);
@@ -4130,7 +4076,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
ret = domain_setup_second_level(iommu, dmar_domain,
dev, pasid, old);
if (ret)
- goto out_remove_dev_pasid;
+ goto out_unwind_iopf;
domain_remove_dev_pasid(old, dev, pasid);
@@ -4138,6 +4084,8 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
return 0;
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
@@ -4352,11 +4300,19 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device
if (dev_is_real_dma_subdevice(dev))
return 0;
+ /*
+ * No PRI support with the global identity domain. No need to enable or
+ * disable PRI in this path as the iommu has been put in the blocking
+ * state.
+ */
if (sm_supported(iommu))
ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
else
ret = device_setup_pass_through(dev);
+ if (!ret)
+ info->domain_attached = true;
+
return ret;
}
@@ -4371,10 +4327,16 @@ static int identity_domain_set_dev_pasid(struct iommu_domain *domain,
if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
return -EOPNOTSUPP;
- ret = domain_setup_passthrough(iommu, dev, pasid, old);
+ ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
return ret;
+ ret = domain_setup_passthrough(iommu, dev, pasid, old);
+ if (ret) {
+ iopf_for_domain_replace(old, domain, dev);
+ return ret;
+ }
+
domain_remove_dev_pasid(old, dev, pasid);
return 0;
}
@@ -4401,8 +4363,6 @@ const struct iommu_ops intel_iommu_ops = {
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.device_group = intel_iommu_device_group,
- .dev_enable_feat = intel_iommu_dev_enable_feat,
- .dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index c4916886da5a..3ddbcc603de2 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -493,14 +493,13 @@ struct q_inval {
/* Page Request Queue depth */
#define PRQ_ORDER 4
-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
-#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
+#define PRQ_SIZE (SZ_4K << PRQ_ORDER)
+#define PRQ_RING_MASK (PRQ_SIZE - 0x20)
+#define PRQ_DEPTH (PRQ_SIZE >> 5)
struct dmar_pci_notify_info;
#ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
@@ -722,7 +721,9 @@ struct intel_iommu {
unsigned char name[16]; /* Device Name */
#ifdef CONFIG_INTEL_IOMMU
- unsigned long *domain_ids; /* bitmap of domains */
+ /* mutex to protect domain_ida */
+ struct mutex did_lock;
+ struct ida domain_ida; /* domain id allocator */
unsigned long *copied_tables; /* bitmap of copied tables */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
@@ -773,6 +774,7 @@ struct device_domain_info {
u8 ats_supported:1;
u8 ats_enabled:1;
u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
+ u8 domain_attached:1; /* Device has domain attached */
u8 ats_qdep;
unsigned int iopf_refcount;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
@@ -809,11 +811,22 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
}
/*
- * Domain ID reserved for pasid entries programmed for first-level
- * only and pass-through transfer modes.
+ * Domain ID 0 and 1 are reserved:
+ *
+ * If Caching mode is set, then invalid translations are tagged
+ * with domain-id 0, hence we need to pre-allocate it. We also
+ * use domain-id 0 as a marker for non-allocated domain-id, so
+ * make sure it is not used for a real domain.
+ *
+ * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
+ * entry for first-level or pass-through translation modes should
+ * be programmed with a domain id different from those used for
+ * second-level or nested translation. We reserve a domain id for
+ * this purpose. This domain id is also used for identity domain
+ * in legacy mode.
*/
#define FLPT_DEFAULT_DID 1
-#define NUM_RESERVED_DID 2
+#define IDA_START_DID 2
/* Retrieve the domain ID which has allocated to the domain */
static inline u16
@@ -1298,6 +1311,39 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid);
int intel_iommu_enable_iopf(struct device *dev);
void intel_iommu_disable_iopf(struct device *dev);
+static inline int iopf_for_domain_set(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (!domain || !domain->iopf_handler)
+ return 0;
+
+ return intel_iommu_enable_iopf(dev);
+}
+
+static inline void iopf_for_domain_remove(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (!domain || !domain->iopf_handler)
+ return;
+
+ intel_iommu_disable_iopf(dev);
+}
+
+static inline int iopf_for_domain_replace(struct iommu_domain *new,
+ struct iommu_domain *old,
+ struct device *dev)
+{
+ int ret;
+
+ ret = iopf_for_domain_set(new, dev);
+ if (ret)
+ return ret;
+
+ iopf_for_domain_remove(old, dev);
+
+ return 0;
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 3bc2a03cceca..cf7b6882ec75 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -530,11 +530,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (!ir_table)
return -ENOMEM;
- ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
- INTR_REMAP_PAGE_ORDER);
+ /* 1MB - maximum possible interrupt remapping table size */
+ ir_table_base =
+ iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M);
if (!ir_table_base) {
- pr_err("IR%d: failed to allocate pages of order %d\n",
- iommu->seq_id, INTR_REMAP_PAGE_ORDER);
+ pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id);
goto out_free_table;
}
@@ -612,7 +612,7 @@ out_free_fwnode:
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
- iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(ir_table_base);
out_free_table:
kfree(ir_table);
@@ -633,7 +633,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
irq_domain_free_fwnode(fn);
iommu->ir_domain = NULL;
}
- iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(iommu->ir_table->base);
bitmap_free(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index 6ac5c534bef4..fc312f649f9e 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -27,8 +27,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
unsigned long flags;
int ret = 0;
- if (info->domain)
- device_block_translation(dev);
+ device_block_translation(dev);
if (iommu->agaw < dmar_domain->s2_domain->agaw) {
dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
@@ -56,17 +55,24 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
if (ret)
goto detach_iommu;
+ ret = iopf_for_domain_set(domain, dev);
+ if (ret)
+ goto unassign_tag;
+
ret = intel_pasid_setup_nested(iommu, dev,
IOMMU_NO_PASID, dmar_domain);
if (ret)
- goto unassign_tag;
+ goto disable_iopf;
info->domain = dmar_domain;
+ info->domain_attached = true;
spin_lock_irqsave(&dmar_domain->lock, flags);
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return 0;
+disable_iopf:
+ iopf_for_domain_remove(domain, dev);
unassign_tag:
cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
detach_iommu:
@@ -166,14 +172,20 @@ static int intel_nested_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
- ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
+ ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
goto out_remove_dev_pasid;
+ ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
+ if (ret)
+ goto out_unwind_iopf;
+
domain_remove_dev_pasid(old, dev, pasid);
return 0;
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 7ee18bb48bd4..ac67a056b6c8 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -60,14 +60,14 @@ int intel_pasid_alloc_table(struct device *dev)
size = max_pasid >> (PASID_PDE_SHIFT - 3);
order = size ? get_order(size) : 0;
- dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order);
+ dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL,
+ 1 << (order + PAGE_SHIFT));
if (!dir) {
kfree(pasid_table);
return -ENOMEM;
}
pasid_table->table = dir;
- pasid_table->order = order;
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
info->pasid_table = pasid_table;
@@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev)
max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
for (i = 0; i < max_pde; i++) {
table = get_pasid_table_from_pde(&dir[i]);
- iommu_free_page(table);
+ iommu_free_pages(table);
}
- iommu_free_pages(pasid_table->table, pasid_table->order);
+ iommu_free_pages(pasid_table->table);
kfree(pasid_table);
}
@@ -148,7 +148,8 @@ retry:
if (!entries) {
u64 tmp;
- entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC);
+ entries = iommu_alloc_pages_node_sz(info->iommu->node,
+ GFP_ATOMIC, SZ_4K);
if (!entries)
return NULL;
@@ -161,7 +162,7 @@ retry:
tmp = 0ULL;
if (!try_cmpxchg64(&dir[dir_index].val, &tmp,
(u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
- iommu_free_page(entries);
+ iommu_free_pages(entries);
goto retry;
}
if (!ecap_coherent(info->iommu->ecap)) {
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 668d8ece6b14..fd0fd1a0df84 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -47,7 +47,6 @@ struct pasid_entry {
/* The representative of a PASID table */
struct pasid_table {
void *table; /* pasid table pointer */
- int order; /* page order of pasid table */
u32 max_pasid; /* max pasid */
};
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 5b6a64d96850..52570e42a14c 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -290,7 +290,8 @@ int intel_iommu_enable_prq(struct intel_iommu *iommu)
struct iopf_queue *iopfq;
int irq, ret;
- iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
+ iommu->prq =
+ iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE);
if (!iommu->prq) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
@@ -340,7 +341,7 @@ free_hwirq:
dmar_free_hwirq(irq);
iommu->pr_irq = 0;
free_prq:
- iommu_free_pages(iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return ret;
@@ -363,7 +364,7 @@ int intel_iommu_finish_prq(struct intel_iommu *iommu)
iommu->iopf_queue = NULL;
}
- iommu_free_pages(iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return 0;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index ba93123cb4eb..f3da596410b5 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -164,18 +164,23 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
+ ret = iopf_for_domain_replace(domain, old, dev);
+ if (ret)
+ goto out_remove_dev_pasid;
+
/* Setup the pasid table: */
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
ret = __domain_setup_first_level(iommu, dev, pasid,
FLPT_DEFAULT_DID, mm->pgd,
sflags, old);
if (ret)
- goto out_remove_dev_pasid;
+ goto out_unwind_iopf;
domain_remove_dev_pasid(old, dev, pasid);
return 0;
-
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 7632c80edea6..96425e92f313 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
+#include <linux/device/faux.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -251,8 +252,6 @@ static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
(data->start_level == 1) && (oas == 40);
}
-static bool selftest_running = false;
-
static dma_addr_t __arm_lpae_dma_addr(void *pages)
{
return (dma_addr_t)virt_to_phys(pages);
@@ -263,16 +262,20 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *cookie)
{
struct device *dev = cfg->iommu_dev;
- int order = get_order(size);
+ size_t alloc_size;
dma_addr_t dma;
void *pages;
- VM_BUG_ON((gfp & __GFP_HIGHMEM));
-
+ /*
+ * For very small starting-level translation tables the HW requires a
+ * minimum alignment of at least 64 to cover all cases.
+ */
+ alloc_size = max(size, 64);
if (cfg->alloc)
- pages = cfg->alloc(cookie, size, gfp);
+ pages = cfg->alloc(cookie, alloc_size, gfp);
else
- pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
+ pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp,
+ alloc_size);
if (!pages)
return NULL;
@@ -300,7 +303,7 @@ out_free:
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, order);
+ iommu_free_pages(pages);
return NULL;
}
@@ -316,7 +319,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, get_order(size));
+ iommu_free_pages(pages);
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
@@ -371,7 +374,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
for (i = 0; i < num_entries; i++)
if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
/*
@@ -473,7 +476,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
cptep = iopte_deref(pte, data);
} else if (pte) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
}
@@ -641,8 +644,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
ptep += unmap_idx_start;
pte = READ_ONCE(*ptep);
- if (WARN_ON(!pte))
- return 0;
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
+ return -ENOENT;
+ }
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
@@ -652,8 +657,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* Find and handle non-leaf entries */
for (i = 0; i < num_entries; i++) {
pte = READ_ONCE(ptep[i]);
- if (WARN_ON(!pte))
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
break;
+ }
if (!iopte_leaf(pte, lvl, iop->fmt)) {
__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
@@ -968,7 +975,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
- IO_PGTABLE_QUIRK_ARM_HD))
+ IO_PGTABLE_QUIRK_ARM_HD |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1069,7 +1077,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1310,7 +1319,6 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
#define __FAIL(ops, i) ({ \
WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
arm_lpae_dump_ops(ops); \
- selftest_running = false; \
-EFAULT; \
})
@@ -1326,8 +1334,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
size_t size, mapped;
struct io_pgtable_ops *ops;
- selftest_running = true;
-
for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
cfg_cookie = cfg;
ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
@@ -1416,7 +1422,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
free_io_pgtable_ops(ops);
}
- selftest_running = false;
return 0;
}
@@ -1433,15 +1438,18 @@ static int __init arm_lpae_do_selftests(void)
};
int i, j, k, pass = 0, fail = 0;
- struct device dev;
+ struct faux_device *dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.coherent_walk = true,
- .iommu_dev = &dev,
+ .quirks = IO_PGTABLE_QUIRK_NO_WARN,
};
- /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
- set_dev_node(&dev, NUMA_NO_NODE);
+ dev = faux_device_create("io-pgtable-test", NULL, 0);
+ if (!dev)
+ return -ENOMEM;
+
+ cfg.iommu_dev = &dev->dev;
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
@@ -1461,6 +1469,8 @@ static int __init arm_lpae_do_selftests(void)
}
pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
+ faux_device_destroy(dev);
+
return fail ? -EFAULT : 0;
}
subsys_initcall(arm_lpae_do_selftests);
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 06aca9ab52f9..679bda104797 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -107,14 +107,6 @@ static phys_addr_t iopte_to_paddr(dart_iopte pte,
return paddr;
}
-static void *__dart_alloc_pages(size_t size, gfp_t gfp)
-{
- int order = get_order(size);
-
- VM_BUG_ON((gfp & __GFP_HIGHMEM));
- return iommu_alloc_pages(gfp, order);
-}
-
static int dart_init_pte(struct dart_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
dart_iopte prot, int num_entries,
@@ -256,13 +248,13 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
/* no L2 table present */
if (!pte) {
- cptep = __dart_alloc_pages(tblsz, gfp);
+ cptep = iommu_alloc_pages_sz(gfp, tblsz);
if (!cptep)
return -ENOMEM;
pte = dart_install_table(cptep, ptep, 0, data);
if (pte)
- iommu_free_pages(cptep, get_order(tblsz));
+ iommu_free_pages(cptep);
/* L2 table is present (now) */
pte = READ_ONCE(*ptep);
@@ -413,7 +405,8 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
- data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
+ data->pgd[i] =
+ iommu_alloc_pages_sz(GFP_KERNEL, DART_GRANULE(data));
if (!data->pgd[i])
goto out_free_data;
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
@@ -423,8 +416,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
out_free_data:
while (--i >= 0) {
- iommu_free_pages(data->pgd[i],
- get_order(DART_GRANULE(data)));
+ iommu_free_pages(data->pgd[i]);
}
kfree(data);
return NULL;
@@ -433,7 +425,6 @@ out_free_data:
static void apple_dart_free_pgtable(struct io_pgtable *iop)
{
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
- int order = get_order(DART_GRANULE(data));
dart_iopte *ptep, *end;
int i;
@@ -445,9 +436,9 @@ static void apple_dart_free_pgtable(struct io_pgtable *iop)
dart_iopte pte = *ptep++;
if (pte)
- iommu_free_pages(iopte_deref(pte, data), order);
+ iommu_free_pages(iopte_deref(pte, data));
}
- iommu_free_pages(data->pgd[i], order);
+ iommu_free_pages(data->pgd[i]);
}
kfree(data);
diff --git a/drivers/iommu/iommu-pages.c b/drivers/iommu/iommu-pages.c
new file mode 100644
index 000000000000..238c09e5166b
--- /dev/null
+++ b/drivers/iommu/iommu-pages.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#include "iommu-pages.h"
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+#define IOPTDESC_MATCH(pg_elm, elm) \
+ static_assert(offsetof(struct page, pg_elm) == \
+ offsetof(struct ioptdesc, elm))
+IOPTDESC_MATCH(flags, __page_flags);
+IOPTDESC_MATCH(lru, iopt_freelist_elm); /* Ensure bit 0 is clear */
+IOPTDESC_MATCH(mapping, __page_mapping);
+IOPTDESC_MATCH(private, _private);
+IOPTDESC_MATCH(page_type, __page_type);
+IOPTDESC_MATCH(_refcount, __page_refcount);
+#ifdef CONFIG_MEMCG
+IOPTDESC_MATCH(memcg_data, memcg_data);
+#endif
+#undef IOPTDESC_MATCH
+static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
+
+/**
+ * iommu_alloc_pages_node_sz - Allocate a zeroed page of a given size from
+ * specific NUMA node
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ * @size: Memory size to allocate, rounded up to a power of 2
+ *
+ * Returns the virtual address of the allocated page. The page must be freed
+ * either by calling iommu_free_pages() or via iommu_put_pages_list(). The
+ * returned allocation is round_up_pow_two(size) big, and is physically aligned
+ * to its size.
+ */
+void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size)
+{
+ unsigned long pgcnt;
+ struct folio *folio;
+ unsigned int order;
+
+ /* This uses page_address() on the memory. */
+ if (WARN_ON(gfp & __GFP_HIGHMEM))
+ return NULL;
+
+ /*
+ * Currently sub page allocations result in a full page being returned.
+ */
+ order = get_order(size);
+
+ /*
+ * __folio_alloc_node() does not handle NUMA_NO_NODE like
+ * alloc_pages_node() did.
+ */
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid);
+ if (unlikely(!folio))
+ return NULL;
+
+ /*
+ * All page allocations that should be reported to as "iommu-pagetables"
+ * to userspace must use one of the functions below. This includes
+ * allocations of page-tables and other per-iommu_domain configuration
+ * structures.
+ *
+ * This is necessary for the proper accounting as IOMMU state can be
+ * rather large, i.e. multiple gigabytes in size.
+ */
+ pgcnt = 1UL << order;
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt);
+
+ return folio_address(folio);
+}
+EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz);
+
+static void __iommu_free_desc(struct ioptdesc *iopt)
+{
+ struct folio *folio = ioptdesc_folio(iopt);
+ const unsigned long pgcnt = 1UL << folio_order(folio);
+
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt);
+ folio_put(folio);
+}
+
+/**
+ * iommu_free_pages - free pages
+ * @virt: virtual address of the page to be freed.
+ *
+ * The page must have have been allocated by iommu_alloc_pages_node_sz()
+ */
+void iommu_free_pages(void *virt)
+{
+ if (!virt)
+ return;
+ __iommu_free_desc(virt_to_ioptdesc(virt));
+}
+EXPORT_SYMBOL_GPL(iommu_free_pages);
+
+/**
+ * iommu_put_pages_list - free a list of pages.
+ * @list: The list of pages to be freed
+ *
+ * Frees a list of pages allocated by iommu_alloc_pages_node_sz(). On return the
+ * passed list is invalid, the caller must use IOMMU_PAGES_LIST_INIT to reinit
+ * the list if it expects to use it again.
+ */
+void iommu_put_pages_list(struct iommu_pages_list *list)
+{
+ struct ioptdesc *iopt, *tmp;
+
+ list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm)
+ __iommu_free_desc(iopt);
+}
+EXPORT_SYMBOL_GPL(iommu_put_pages_list);
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index 82ebf0033081..b3af2813ed0c 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -7,180 +7,95 @@
#ifndef __IOMMU_PAGES_H
#define __IOMMU_PAGES_H
-#include <linux/vmstat.h>
-#include <linux/gfp.h>
-#include <linux/mm.h>
-
-/*
- * All page allocations that should be reported to as "iommu-pagetables" to
- * userspace must use one of the functions below. This includes allocations of
- * page-tables and other per-iommu_domain configuration structures.
- *
- * This is necessary for the proper accounting as IOMMU state can be rather
- * large, i.e. multiple gigabytes in size.
- */
-
-/**
- * __iommu_alloc_account - account for newly allocated page.
- * @page: head struct page of the page.
- * @order: order of the page
- */
-static inline void __iommu_alloc_account(struct page *page, int order)
-{
- const long pgcnt = 1l << order;
-
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
-}
-
-/**
- * __iommu_free_account - account a page that is about to be freed.
- * @page: head struct page of the page.
- * @order: order of the page
- */
-static inline void __iommu_free_account(struct page *page, int order)
-{
- const long pgcnt = 1l << order;
-
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
-}
+#include <linux/iommu.h>
/**
- * __iommu_alloc_pages - allocate a zeroed page of a given order.
- * @gfp: buddy allocator flags
- * @order: page order
+ * struct ioptdesc - Memory descriptor for IOMMU page tables
+ * @iopt_freelist_elm: List element for a struct iommu_pages_list
*
- * returns the head struct page of the allocated page.
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
*/
-static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
+struct ioptdesc {
+ unsigned long __page_flags;
+
+ struct list_head iopt_freelist_elm;
+ unsigned long __page_mapping;
+ pgoff_t __index;
+ void *_private;
+
+ unsigned int __page_type;
+ atomic_t __page_refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#endif
+};
+
+static inline struct ioptdesc *folio_ioptdesc(struct folio *folio)
{
- struct page *page;
-
- page = alloc_pages(gfp | __GFP_ZERO, order);
- if (unlikely(!page))
- return NULL;
-
- __iommu_alloc_account(page, order);
-
- return page;
+ return (struct ioptdesc *)folio;
}
-/**
- * __iommu_free_pages - free page of a given order
- * @page: head struct page of the page
- * @order: page order
- */
-static inline void __iommu_free_pages(struct page *page, int order)
+static inline struct folio *ioptdesc_folio(struct ioptdesc *iopt)
{
- if (!page)
- return;
-
- __iommu_free_account(page, order);
- __free_pages(page, order);
+ return (struct folio *)iopt;
}
-/**
- * iommu_alloc_pages_node - allocate a zeroed page of a given order from
- * specific NUMA node.
- * @nid: memory NUMA node id
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * returns the virtual address of the allocated page
- */
-static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
+static inline struct ioptdesc *virt_to_ioptdesc(void *virt)
{
- struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
-
- if (unlikely(!page))
- return NULL;
-
- __iommu_alloc_account(page, order);
-
- return page_address(page);
+ return folio_ioptdesc(virt_to_folio(virt));
}
-/**
- * iommu_alloc_pages - allocate a zeroed page of a given order
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * returns the virtual address of the allocated page
- */
-static inline void *iommu_alloc_pages(gfp_t gfp, int order)
-{
- struct page *page = __iommu_alloc_pages(gfp, order);
-
- if (unlikely(!page))
- return NULL;
-
- return page_address(page);
-}
+void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size);
+void iommu_free_pages(void *virt);
+void iommu_put_pages_list(struct iommu_pages_list *list);
/**
- * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
- * @nid: memory NUMA node id
- * @gfp: buddy allocator flags
- *
- * returns the virtual address of the allocated page
+ * iommu_pages_list_add - add the page to a iommu_pages_list
+ * @list: List to add the page to
+ * @virt: Address returned from iommu_alloc_pages_node_sz()
*/
-static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
+static inline void iommu_pages_list_add(struct iommu_pages_list *list,
+ void *virt)
{
- return iommu_alloc_pages_node(nid, gfp, 0);
+ list_add_tail(&virt_to_ioptdesc(virt)->iopt_freelist_elm, &list->pages);
}
/**
- * iommu_alloc_page - allocate a zeroed page
- * @gfp: buddy allocator flags
+ * iommu_pages_list_splice - Put all the pages in list from into list to
+ * @from: Source list of pages
+ * @to: Destination list of pages
*
- * returns the virtual address of the allocated page
+ * from must be re-initialized after calling this function if it is to be
+ * used again.
*/
-static inline void *iommu_alloc_page(gfp_t gfp)
+static inline void iommu_pages_list_splice(struct iommu_pages_list *from,
+ struct iommu_pages_list *to)
{
- return iommu_alloc_pages(gfp, 0);
+ list_splice(&from->pages, &to->pages);
}
/**
- * iommu_free_pages - free page of a given order
- * @virt: virtual address of the page to be freed.
- * @order: page order
+ * iommu_pages_list_empty - True if the list is empty
+ * @list: List to check
*/
-static inline void iommu_free_pages(void *virt, int order)
+static inline bool iommu_pages_list_empty(struct iommu_pages_list *list)
{
- if (!virt)
- return;
-
- __iommu_free_pages(virt_to_page(virt), order);
+ return list_empty(&list->pages);
}
/**
- * iommu_free_page - free page
- * @virt: virtual address of the page to be freed.
- */
-static inline void iommu_free_page(void *virt)
-{
- iommu_free_pages(virt, 0);
-}
-
-/**
- * iommu_put_pages_list - free a list of pages.
- * @page: the head of the lru list to be freed.
+ * iommu_alloc_pages_sz - Allocate a zeroed page of a given size from
+ * specific NUMA node
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ * @size: Memory size to allocate, this is rounded up to a power of 2
*
- * There are no locking requirement for these pages, as they are going to be
- * put on a free list as soon as refcount reaches 0. Pages are put on this LRU
- * list once they are removed from the IOMMU page tables. However, they can
- * still be access through debugfs.
+ * Returns the virtual address of the allocated page.
*/
-static inline void iommu_put_pages_list(struct list_head *page)
+static inline void *iommu_alloc_pages_sz(gfp_t gfp, size_t size)
{
- while (!list_empty(page)) {
- struct page *p = list_entry(page->prev, struct page, lru);
-
- list_del(&p->lru);
- __iommu_free_account(p, 0);
- put_page(p);
- }
+ return iommu_alloc_pages_node_sz(NUMA_NO_NODE, gfp, size);
}
#endif /* __IOMMU_PAGES_H */
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index ab18bc494eef..1a51cfd82808 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -63,9 +63,6 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
* reference is taken. Caller must call iommu_sva_unbind_device()
* to release each reference.
*
- * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
- * initialize the required SVA features.
- *
* On error, returns an ERR_PTR value.
*/
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
@@ -299,15 +296,12 @@ static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_domain *domain;
- if (ops->domain_alloc_sva) {
- domain = ops->domain_alloc_sva(dev, mm);
- if (IS_ERR(domain))
- return domain;
- } else {
- domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
- if (!domain)
- return ERR_PTR(-ENOMEM);
- }
+ if (!ops->domain_alloc_sva)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = ops->domain_alloc_sva(dev, mm);
+ if (IS_ERR(domain))
+ return domain;
domain->type = IOMMU_DOMAIN_SVA;
domain->cookie_type = IOMMU_COOKIE_SVA;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 6c02f93422ce..a4b606c591da 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -277,6 +277,8 @@ int iommu_device_register(struct iommu_device *iommu,
err = bus_iommu_probe(iommu_buses[i]);
if (err)
iommu_device_unregister(iommu);
+ else
+ WRITE_ONCE(iommu->ready, true);
return err;
}
EXPORT_SYMBOL_GPL(iommu_device_register);
@@ -422,13 +424,15 @@ static int iommu_init_device(struct device *dev)
* is buried in the bus dma_configure path. Properly unpicking that is
* still a big job, so for now just invoke the whole thing. The device
* already having a driver bound means dma_configure has already run and
- * either found no IOMMU to wait for, or we're in its replay call right
- * now, so either way there's no point calling it again.
+ * found no IOMMU to wait for, so there's no point calling it again.
*/
- if (!dev->driver && dev->bus->dma_configure) {
+ if (!dev->iommu->fwspec && !dev->driver && dev->bus->dma_configure) {
mutex_unlock(&iommu_probe_device_lock);
dev->bus->dma_configure(dev);
mutex_lock(&iommu_probe_device_lock);
+ /* If another instance finished the job for us, skip it */
+ if (!dev->iommu || dev->iommu_group)
+ return -ENODEV;
}
/*
* At this point, relevant devices either now have a fwspec which will
@@ -1629,15 +1633,13 @@ static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev)
if (ops->identity_domain)
return ops->identity_domain;
- /* Older drivers create the identity domain via ops->domain_alloc() */
- if (!ops->domain_alloc)
+ if (ops->domain_alloc_identity) {
+ domain = ops->domain_alloc_identity(dev);
+ if (IS_ERR(domain))
+ return domain;
+ } else {
return ERR_PTR(-EOPNOTSUPP);
-
- domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY);
- if (IS_ERR(domain))
- return domain;
- if (!domain)
- return ERR_PTR(-ENOMEM);
+ }
iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops);
return domain;
@@ -2025,8 +2027,10 @@ __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type,
domain = ops->domain_alloc_paging(dev);
else if (ops->domain_alloc_paging_flags)
domain = ops->domain_alloc_paging_flags(dev, flags, NULL);
+#if IS_ENABLED(CONFIG_FSL_PAMU)
else if (ops->domain_alloc && !flags)
domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
+#endif
else
return ERR_PTR(-EOPNOTSUPP);
@@ -2204,6 +2208,19 @@ static void *iommu_make_pasid_array_entry(struct iommu_domain *domain,
return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN);
}
+static bool domain_iommu_ops_compatible(const struct iommu_ops *ops,
+ struct iommu_domain *domain)
+{
+ if (domain->owner == ops)
+ return true;
+
+ /* For static domains, owner isn't set. */
+ if (domain == ops->blocked_domain || domain == ops->identity_domain)
+ return true;
+
+ return false;
+}
+
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
@@ -2214,7 +2231,8 @@ static int __iommu_attach_group(struct iommu_domain *domain,
return -EBUSY;
dev = iommu_group_first_dev(group);
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+ if (!dev_has_iommu(dev) ||
+ !domain_iommu_ops_compatible(dev_iommu_ops(dev), domain))
return -EINVAL;
return __iommu_group_set_domain(group, domain);
@@ -2395,6 +2413,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
unsigned int pgsize_idx, pgsize_idx_next;
unsigned long pgsizes;
size_t offset, pgsize, pgsize_next;
+ size_t offset_end;
unsigned long addr_merge = paddr | iova;
/* Page sizes supported by the hardware and small enough for @size */
@@ -2435,7 +2454,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
* If size is big enough to accommodate the larger page, reduce
* the number of smaller pages.
*/
- if (offset + pgsize_next <= size)
+ if (!check_add_overflow(offset, pgsize_next, &offset_end) &&
+ offset_end <= size)
size = offset;
out_set_count:
@@ -2842,31 +2862,39 @@ bool iommu_default_passthrough(void)
}
EXPORT_SYMBOL_GPL(iommu_default_passthrough);
-const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+static const struct iommu_device *iommu_from_fwnode(const struct fwnode_handle *fwnode)
{
- const struct iommu_ops *ops = NULL;
- struct iommu_device *iommu;
+ const struct iommu_device *iommu, *ret = NULL;
spin_lock(&iommu_device_lock);
list_for_each_entry(iommu, &iommu_device_list, list)
if (iommu->fwnode == fwnode) {
- ops = iommu->ops;
+ ret = iommu;
break;
}
spin_unlock(&iommu_device_lock);
- return ops;
+ return ret;
+}
+
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+{
+ const struct iommu_device *iommu = iommu_from_fwnode(fwnode);
+
+ return iommu ? iommu->ops : NULL;
}
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
{
- const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode);
+ const struct iommu_device *iommu = iommu_from_fwnode(iommu_fwnode);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!ops)
+ if (!iommu)
return driver_deferred_probe_check_state(dev);
+ if (!dev->iommu && !READ_ONCE(iommu->ready))
+ return -EPROBE_DEFER;
if (fwspec)
- return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
+ return iommu->ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
if (!dev_iommu_get(dev))
return -ENOMEM;
@@ -2920,38 +2948,6 @@ int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
-/*
- * Per device IOMMU features.
- */
-int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev_has_iommu(dev)) {
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->dev_enable_feat)
- return ops->dev_enable_feat(dev, feat);
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
-
-/*
- * The device drivers should do the necessary cleanups before calling this.
- */
-int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev_has_iommu(dev)) {
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->dev_disable_feat)
- return ops->dev_disable_feat(dev, feat);
- }
-
- return -EBUSY;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
-
/**
* iommu_setup_default_domain - Set the default_domain for the group
* @group: Group to change
@@ -3454,7 +3450,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
!ops->blocked_domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
- if (ops != domain->owner || pasid == IOMMU_NO_PASID)
+ if (!domain_iommu_ops_compatible(ops, domain) ||
+ pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
@@ -3536,7 +3533,7 @@ int iommu_replace_device_pasid(struct iommu_domain *domain,
if (!domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
- if (dev_iommu_ops(dev) != domain->owner ||
+ if (!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain) ||
pasid == IOMMU_NO_PASID || !handle)
return -EINVAL;
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 2111bad72c72..86244403b532 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -221,7 +221,6 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
refcount_inc(&idev->obj.users);
/* igroup refcount moves into iommufd_device */
idev->igroup = igroup;
- mutex_init(&idev->iopf_lock);
/*
* If the caller fails after this success it must call
@@ -425,6 +424,25 @@ static int iommufd_hwpt_pasid_compat(struct iommufd_hw_pagetable *hwpt,
return 0;
}
+static bool iommufd_hwpt_compatible_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct pci_dev *pdev;
+
+ if (!hwpt->fault || !dev_is_pci(idev->dev))
+ return true;
+
+ /*
+ * Once we turn on PCI/PRI support for VF, the response failure code
+ * should not be forwarded to the hardware due to PRI being a shared
+ * resource between PF and VFs. There is no coordination for this
+ * shared capability. This waits for a vPRI reset to recover.
+ */
+ pdev = to_pci_dev(idev->dev);
+
+ return (!pdev->is_virtfn || !pci_pri_supported(pdev));
+}
+
static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev,
ioasid_t pasid)
@@ -432,6 +450,9 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle;
int rc;
+ if (!iommufd_hwpt_compatible_device(hwpt, idev))
+ return -EINVAL;
+
rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
if (rc)
return rc;
@@ -440,12 +461,6 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
if (!handle)
return -ENOMEM;
- if (hwpt->fault) {
- rc = iommufd_fault_iopf_enable(idev);
- if (rc)
- goto out_free_handle;
- }
-
handle->idev = idev;
if (pasid == IOMMU_NO_PASID)
rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
@@ -454,13 +469,10 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
rc = iommu_attach_device_pasid(hwpt->domain, idev->dev, pasid,
&handle->handle);
if (rc)
- goto out_disable_iopf;
+ goto out_free_handle;
return 0;
-out_disable_iopf:
- if (hwpt->fault)
- iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
@@ -492,10 +504,7 @@ static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
else
iommu_detach_device_pasid(hwpt->domain, idev->dev, pasid);
- if (hwpt->fault) {
- iommufd_auto_response_faults(hwpt, handle);
- iommufd_fault_iopf_disable(idev);
- }
+ iommufd_auto_response_faults(hwpt, handle);
kfree(handle);
}
@@ -507,6 +516,9 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
struct iommufd_attach_handle *handle, *old_handle;
int rc;
+ if (!iommufd_hwpt_compatible_device(hwpt, idev))
+ return -EINVAL;
+
rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
if (rc)
return rc;
@@ -517,12 +529,6 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
if (!handle)
return -ENOMEM;
- if (hwpt->fault && !old->fault) {
- rc = iommufd_fault_iopf_enable(idev);
- if (rc)
- goto out_free_handle;
- }
-
handle->idev = idev;
if (pasid == IOMMU_NO_PASID)
rc = iommu_replace_group_handle(idev->igroup->group,
@@ -531,20 +537,13 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
rc = iommu_replace_device_pasid(hwpt->domain, idev->dev,
pasid, &handle->handle);
if (rc)
- goto out_disable_iopf;
+ goto out_free_handle;
- if (old->fault) {
- iommufd_auto_response_faults(hwpt, old_handle);
- if (!hwpt->fault)
- iommufd_fault_iopf_disable(idev);
- }
+ iommufd_auto_response_faults(hwpt, old_handle);
kfree(old_handle);
return 0;
-out_disable_iopf:
- if (hwpt->fault && !old->fault)
- iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index f39cf0797347..e373b9eec7f5 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -9,8 +9,6 @@
#include <linux/iommufd.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/pci-ats.h>
#include <linux/poll.h>
#include <uapi/linux/iommufd.h>
@@ -18,50 +16,6 @@
#include "iommufd_private.h"
/* IOMMUFD_OBJ_FAULT Functions */
-
-int iommufd_fault_iopf_enable(struct iommufd_device *idev)
-{
- struct device *dev = idev->dev;
- int ret;
-
- /*
- * Once we turn on PCI/PRI support for VF, the response failure code
- * should not be forwarded to the hardware due to PRI being a shared
- * resource between PF and VFs. There is no coordination for this
- * shared capability. This waits for a vPRI reset to recover.
- */
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (pdev->is_virtfn && pci_pri_supported(pdev))
- return -EINVAL;
- }
-
- mutex_lock(&idev->iopf_lock);
- /* Device iopf has already been on. */
- if (++idev->iopf_enabled > 1) {
- mutex_unlock(&idev->iopf_lock);
- return 0;
- }
-
- ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- --idev->iopf_enabled;
- mutex_unlock(&idev->iopf_lock);
-
- return ret;
-}
-
-void iommufd_fault_iopf_disable(struct iommufd_device *idev)
-{
- mutex_lock(&idev->iopf_lock);
- if (!WARN_ON(idev->iopf_enabled == 0)) {
- if (--idev->iopf_enabled == 0)
- iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
- }
- mutex_unlock(&idev->iopf_lock);
-}
-
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle)
{
@@ -70,7 +24,7 @@ void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct list_head free_list;
unsigned long index;
- if (!fault)
+ if (!fault || !handle)
return;
INIT_LIST_HEAD(&free_list);
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 80e8c76d25f2..9ccc83341f32 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -425,9 +425,6 @@ struct iommufd_device {
/* always the physical device */
struct device *dev;
bool enforce_cache_coherency;
- /* protect iopf_enabled counter */
- struct mutex iopf_lock;
- unsigned int iopf_enabled;
};
static inline struct iommufd_device *
@@ -506,9 +503,6 @@ iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
void iommufd_fault_destroy(struct iommufd_object *obj);
int iommufd_fault_iopf_handler(struct iopf_group *group);
-
-int iommufd_fault_iopf_enable(struct iommufd_device *idev);
-void iommufd_fault_iopf_disable(struct iommufd_device *idev);
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle);
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 18d9a216eb30..6bd0abf9a641 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -58,6 +58,9 @@ enum {
MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
+static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
+static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain);
+
/*
* Syzkaller has trouble randomizing the correct iova to use since it is linked
* to the map ioctl's output, and it has no ide about that. So, simplify things.
@@ -168,6 +171,8 @@ struct mock_dev {
int id;
u32 cache[MOCK_DEV_CACHE_NUM];
atomic_t pasid_1024_fake_error;
+ unsigned int iopf_refcount;
+ struct iommu_domain *domain;
};
static inline struct mock_dev *to_mock_dev(struct device *dev)
@@ -221,6 +226,13 @@ static int mock_domain_nop_attach(struct iommu_domain *domain,
up_write(&mdev->viommu_rwsem);
}
+ rc = mock_dev_enable_iopf(dev, domain);
+ if (rc)
+ return rc;
+
+ mock_dev_disable_iopf(dev, mdev->domain);
+ mdev->domain = domain;
+
return 0;
}
@@ -229,6 +241,7 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
struct iommu_domain *old)
{
struct mock_dev *mdev = to_mock_dev(dev);
+ int rc;
/*
* Per the first attach with pasid 1024, set the
@@ -256,6 +269,12 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
}
}
+ rc = mock_dev_enable_iopf(dev, domain);
+ if (rc)
+ return rc;
+
+ mock_dev_disable_iopf(dev, old);
+
return 0;
}
@@ -610,22 +629,42 @@ static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt
{
}
-static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
+static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain)
{
- if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
+ struct mock_dev *mdev = to_mock_dev(dev);
+ int ret;
+
+ if (!domain || !domain->iopf_handler)
+ return 0;
+
+ if (!mock_iommu_iopf_queue)
return -ENODEV;
- return iopf_queue_add_device(mock_iommu_iopf_queue, dev);
+ if (mdev->iopf_refcount) {
+ mdev->iopf_refcount++;
+ return 0;
+ }
+
+ ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev);
+ if (ret)
+ return ret;
+
+ mdev->iopf_refcount = 1;
+
+ return 0;
}
-static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
+static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain)
{
- if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
- return -ENODEV;
+ struct mock_dev *mdev = to_mock_dev(dev);
- iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
+ if (!domain || !domain->iopf_handler)
+ return;
- return 0;
+ if (--mdev->iopf_refcount)
+ return;
+
+ iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
}
static void mock_viommu_destroy(struct iommufd_viommu *viommu)
@@ -770,8 +809,6 @@ static const struct iommu_ops mock_ops = {
.device_group = generic_device_group,
.probe_device = mock_probe_device,
.page_response = mock_domain_page_response,
- .dev_enable_feat = mock_dev_enable_feat,
- .dev_disable_feat = mock_dev_disable_feat,
.user_pasid_table = true,
.viommu_alloc = mock_viommu_alloc,
.default_domain_ops =
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index e424b279a8cd..90341b24a811 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1090,7 +1090,8 @@ static int ipmmu_probe(struct platform_device *pdev)
if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu))
return 0;
- ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, dev_name(&pdev->dev));
+ ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s",
+ dev_name(&pdev->dev));
if (ret)
return ret;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index df98d0c65f54..cb95fecf6016 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1550,6 +1550,31 @@ static const struct mtk_iommu_plat_data mt6795_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */
};
+static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0}, /* Region0: larb0/1 */
+ [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
+ [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */
+ 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
+ ~0, ~0, ~0, ~0, ~0},
+ [3] = {0},
+ [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
+ [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */
+};
+
+static const struct mtk_iommu_plat_data mt6893_data = {
+ .m4u_plat = M4U_MT8192,
+ .flags = HAS_BCLK | OUT_ORDER_WR_EN | HAS_SUB_COMM_2BITS |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8192_larb_region_msk,
+ .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
+ {0, 14, 16}, {0, 13, 18, 17}},
+};
+
static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
@@ -1673,17 +1698,6 @@ static const struct mtk_iommu_plat_data mt8188_data_vpp = {
27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}},
};
-static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
- [0] = {~0, ~0}, /* Region0: larb0/1 */
- [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
- [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */
- 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
- ~0, ~0, ~0, ~0, ~0},
- [3] = {0},
- [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
- [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */
-};
-
static const struct mtk_iommu_plat_data mt8192_data = {
.m4u_plat = M4U_MT8192,
.flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
@@ -1777,6 +1791,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data},
+ { .compatible = "mediatek,mt6893-iommu-mm", .data = &mt6893_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
diff --git a/drivers/iommu/riscv/Makefile b/drivers/iommu/riscv/Makefile
index f54c9ed17d41..b5929f9f23e6 100644
--- a/drivers/iommu/riscv/Makefile
+++ b/drivers/iommu/riscv/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o
+obj-y += iommu.o iommu-platform.o
obj-$(CONFIG_RISCV_IOMMU_PCI) += iommu-pci.o
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 8f049d4a0e2c..bb57092ca901 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -48,14 +48,13 @@ static DEFINE_IDA(riscv_iommu_pscids);
/* Device resource-managed allocations */
struct riscv_iommu_devres {
void *addr;
- int order;
};
static void riscv_iommu_devres_pages_release(struct device *dev, void *res)
{
struct riscv_iommu_devres *devres = res;
- iommu_free_pages(devres->addr, devres->order);
+ iommu_free_pages(devres->addr);
}
static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p)
@@ -66,13 +65,14 @@ static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p
return devres->addr == target->addr;
}
-static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
+static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu,
+ unsigned int size)
{
struct riscv_iommu_devres *devres;
void *addr;
- addr = iommu_alloc_pages_node(dev_to_node(iommu->dev),
- GFP_KERNEL_ACCOUNT, order);
+ addr = iommu_alloc_pages_node_sz(dev_to_node(iommu->dev),
+ GFP_KERNEL_ACCOUNT, size);
if (unlikely(!addr))
return NULL;
@@ -80,12 +80,11 @@ static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
sizeof(struct riscv_iommu_devres), GFP_KERNEL);
if (unlikely(!devres)) {
- iommu_free_pages(addr, order);
+ iommu_free_pages(addr);
return NULL;
}
devres->addr = addr;
- devres->order = order;
devres_add(iommu->dev, devres);
@@ -163,9 +162,9 @@ static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu,
} else {
do {
const size_t queue_size = entry_size << (logsz + 1);
- const int order = get_order(queue_size);
- queue->base = riscv_iommu_get_pages(iommu, order);
+ queue->base = riscv_iommu_get_pages(
+ iommu, max(queue_size, SZ_4K));
queue->phys = __pa(queue->base);
} while (!queue->base && logsz-- > 0);
}
@@ -620,7 +619,7 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm
break;
}
- ptr = riscv_iommu_get_pages(iommu, 0);
+ ptr = riscv_iommu_get_pages(iommu, SZ_4K);
if (!ptr)
return NULL;
@@ -700,7 +699,7 @@ static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu)
}
if (!iommu->ddt_root) {
- iommu->ddt_root = riscv_iommu_get_pages(iommu, 0);
+ iommu->ddt_root = riscv_iommu_get_pages(iommu, SZ_4K);
iommu->ddt_phys = __pa(iommu->ddt_root);
}
@@ -1087,7 +1086,8 @@ static void riscv_iommu_iotlb_sync(struct iommu_domain *iommu_domain,
#define _io_pte_entry(pn, prot) ((_PAGE_PFN_MASK & ((pn) << _PAGE_PFN_SHIFT)) | (prot))
static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain,
- unsigned long pte, struct list_head *freelist)
+ unsigned long pte,
+ struct iommu_pages_list *freelist)
{
unsigned long *ptr;
int i;
@@ -1105,9 +1105,9 @@ static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain,
}
if (freelist)
- list_add_tail(&virt_to_page(ptr)->lru, freelist);
+ iommu_pages_list_add(freelist, ptr);
else
- iommu_free_page(ptr);
+ iommu_free_pages(ptr);
}
static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain,
@@ -1144,13 +1144,14 @@ pte_retry:
* page table. This might race with other mappings, retry.
*/
if (_io_pte_none(pte)) {
- addr = iommu_alloc_page_node(domain->numa_node, gfp);
+ addr = iommu_alloc_pages_node_sz(domain->numa_node, gfp,
+ SZ_4K);
if (!addr)
return NULL;
old = pte;
pte = _io_pte_entry(virt_to_pfn(addr), _PAGE_TABLE);
if (cmpxchg_relaxed(ptr, old, pte) != old) {
- iommu_free_page(addr);
+ iommu_free_pages(addr);
goto pte_retry;
}
}
@@ -1194,7 +1195,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
unsigned long *ptr;
unsigned long pte, old, pte_prot;
int rc = 0;
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (!(prot & IOMMU_WRITE))
pte_prot = _PAGE_BASE | _PAGE_READ;
@@ -1225,7 +1226,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
*mapped = size;
- if (!list_empty(&freelist)) {
+ if (!iommu_pages_list_empty(&freelist)) {
/*
* In 1.0 spec version, the smallest scope we can use to
* invalidate all levels of page table (i.e. leaf and non-leaf)
@@ -1385,8 +1386,8 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
domain->numa_node = dev_to_node(iommu->dev);
domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD);
domain->pgd_mode = pgd_mode;
- domain->pgd_root = iommu_alloc_page_node(domain->numa_node,
- GFP_KERNEL_ACCOUNT);
+ domain->pgd_root = iommu_alloc_pages_node_sz(domain->numa_node,
+ GFP_KERNEL_ACCOUNT, SZ_4K);
if (!domain->pgd_root) {
kfree(domain);
return ERR_PTR(-ENOMEM);
@@ -1395,7 +1396,7 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1,
RISCV_IOMMU_MAX_PSCID, GFP_KERNEL);
if (domain->pscid < 0) {
- iommu_free_page(domain->pgd_root);
+ iommu_free_pages(domain->pgd_root);
kfree(domain);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index af4cc91b2bbf..22f74ba33a0e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -730,14 +730,15 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (rk_dte_is_pt_valid(dte))
goto done;
- page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
+ page_table = iommu_alloc_pages_sz(GFP_ATOMIC | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!page_table)
return ERR_PTR(-ENOMEM);
pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) {
dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n");
- iommu_free_page(page_table);
+ iommu_free_pages(page_table);
return ERR_PTR(-ENOMEM);
}
@@ -1062,7 +1063,8 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
- rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
+ rk_domain->dt = iommu_alloc_pages_sz(GFP_KERNEL | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!rk_domain->dt)
goto err_free_domain;
@@ -1086,7 +1088,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
return &rk_domain->domain;
err_free_dt:
- iommu_free_page(rk_domain->dt);
+ iommu_free_pages(rk_domain->dt);
err_free_domain:
kfree(rk_domain);
@@ -1107,13 +1109,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
u32 *page_table = phys_to_virt(pt_phys);
dma_unmap_single(rk_domain->dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
- iommu_free_page(page_table);
+ iommu_free_pages(page_table);
}
}
dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
- iommu_free_page(rk_domain->dt);
+ iommu_free_pages(rk_domain->dt);
kfree(rk_domain);
}
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index e1c76e0f9c2b..433b59f43530 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -31,10 +31,21 @@ struct s390_domain {
unsigned long *dma_table;
spinlock_t list_lock;
struct rcu_head rcu;
+ u8 origin_type;
};
static struct iommu_domain blocking_domain;
+static inline unsigned int calc_rfx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_rsx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK;
+}
+
static inline unsigned int calc_rtx(dma_addr_t ptr)
{
return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
@@ -56,6 +67,20 @@ static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
*entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
}
+static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (rso & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RFX;
+}
+
+static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (rto & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RSX;
+}
+
static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
{
*entry &= ZPCI_RTE_FLAG_MASK;
@@ -70,6 +95,22 @@ static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
*entry |= ZPCI_TABLE_TYPE_SX;
}
+static inline void validate_rf_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RFX;
+}
+
+static inline void validate_rs_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RSX;
+}
+
static inline void validate_rt_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
@@ -120,6 +161,22 @@ static inline int pt_entry_isvalid(unsigned long entry)
return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
}
+static inline unsigned long *get_rf_rso(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
+static inline unsigned long *get_rs_rto(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
static inline unsigned long *get_rt_sto(unsigned long entry)
{
if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
@@ -191,18 +248,59 @@ static void dma_free_seg_table(unsigned long entry)
dma_free_cpu_table(sto);
}
-static void dma_cleanup_tables(unsigned long *table)
+static void dma_free_rt_table(unsigned long entry)
{
+ unsigned long *rto = get_rs_rto(entry);
int rtx;
- if (!table)
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(rto[rtx]))
+ dma_free_seg_table(rto[rtx]);
+
+ dma_free_cpu_table(rto);
+}
+
+static void dma_free_rs_table(unsigned long entry)
+{
+ unsigned long *rso = get_rf_rso(entry);
+ int rsx;
+
+ for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
+ if (reg_entry_isvalid(rso[rsx]))
+ dma_free_rt_table(rso[rsx]);
+
+ dma_free_cpu_table(rso);
+}
+
+static void dma_cleanup_tables(struct s390_domain *domain)
+{
+ int rtx, rsx, rfx;
+
+ if (!domain->dma_table)
return;
- for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
- if (reg_entry_isvalid(table[rtx]))
- dma_free_seg_table(table[rtx]);
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++)
+ if (reg_entry_isvalid(domain->dma_table[rfx]))
+ dma_free_rs_table(domain->dma_table[rfx]);
+ break;
+ case ZPCI_TABLE_TYPE_RSX:
+ for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
+ if (reg_entry_isvalid(domain->dma_table[rsx]))
+ dma_free_rt_table(domain->dma_table[rsx]);
+ break;
+ case ZPCI_TABLE_TYPE_RTX:
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(domain->dma_table[rtx]))
+ dma_free_seg_table(domain->dma_table[rtx]);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
+ return;
+ }
- dma_free_cpu_table(table);
+ dma_free_cpu_table(domain->dma_table);
}
static unsigned long *dma_alloc_page_table(gfp_t gfp)
@@ -218,6 +316,70 @@ static unsigned long *dma_alloc_page_table(gfp_t gfp)
return table;
}
+static unsigned long *dma_walk_rs_table(unsigned long *rso,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned int rsx = calc_rsx(dma_addr);
+ unsigned long old_rse, rse;
+ unsigned long *rsep, *rto;
+
+ rsep = &rso[rsx];
+ rse = READ_ONCE(*rsep);
+ if (reg_entry_isvalid(rse)) {
+ rto = get_rs_rto(rse);
+ } else {
+ rto = dma_alloc_cpu_table(gfp);
+ if (!rto)
+ return NULL;
+
+ set_rs_rto(&rse, virt_to_phys(rto));
+ validate_rs_entry(&rse);
+ entry_clr_protected(&rse);
+
+ old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse);
+ if (old_rse != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(rto);
+ rto = get_rs_rto(old_rse);
+ }
+ }
+ return rto;
+}
+
+static unsigned long *dma_walk_rf_table(unsigned long *rfo,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned int rfx = calc_rfx(dma_addr);
+ unsigned long old_rfe, rfe;
+ unsigned long *rfep, *rso;
+
+ rfep = &rfo[rfx];
+ rfe = READ_ONCE(*rfep);
+ if (reg_entry_isvalid(rfe)) {
+ rso = get_rf_rso(rfe);
+ } else {
+ rso = dma_alloc_cpu_table(gfp);
+ if (!rso)
+ return NULL;
+
+ set_rf_rso(&rfe, virt_to_phys(rso));
+ validate_rf_entry(&rfe);
+ entry_clr_protected(&rfe);
+
+ old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe);
+ if (old_rfe != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(rso);
+ rso = get_rf_rso(old_rfe);
+ }
+ }
+
+ if (!rso)
+ return NULL;
+
+ return dma_walk_rs_table(rso, dma_addr, gfp);
+}
+
static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
{
unsigned long old_rte, rte;
@@ -271,11 +433,31 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
return pto;
}
-static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, gfp_t gfp)
+static unsigned long *dma_walk_region_tables(struct s390_domain *domain,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ return dma_walk_rf_table(domain->dma_table, dma_addr, gfp);
+ case ZPCI_TABLE_TYPE_RSX:
+ return dma_walk_rs_table(domain->dma_table, dma_addr, gfp);
+ case ZPCI_TABLE_TYPE_RTX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
+static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain,
+ dma_addr_t dma_addr, gfp_t gfp)
{
- unsigned long *sto, *pto;
+ unsigned long *rto, *sto, *pto;
unsigned int rtx, sx, px;
+ rto = dma_walk_region_tables(domain, dma_addr, gfp);
+ if (!rto)
+ return NULL;
+
rtx = calc_rtx(dma_addr);
sto = dma_get_seg_table_origin(&rto[rtx], gfp);
if (!sto)
@@ -329,9 +511,25 @@ static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
}
}
+static inline u64 max_tbl_size(struct s390_domain *domain)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RTX:
+ return ZPCI_TABLE_SIZE_RT - 1;
+ case ZPCI_TABLE_TYPE_RSX:
+ return ZPCI_TABLE_SIZE_RS - 1;
+ case ZPCI_TABLE_TYPE_RFX:
+ return U64_MAX;
+ default:
+ return 0;
+ }
+}
+
static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
{
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct s390_domain *s390_domain;
+ u64 aperture_size;
s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
if (!s390_domain)
@@ -342,9 +540,26 @@ static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
kfree(s390_domain);
return NULL;
}
+
+ aperture_size = min(s390_iommu_aperture,
+ zdev->end_dma - zdev->start_dma + 1);
+ if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
+ } else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) &&
+ (zdev->dtsm & ZPCI_IOTA_DT_RS)) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX;
+ } else if (zdev->dtsm & ZPCI_IOTA_DT_RF) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX;
+ } else {
+ /* Assume RTX available */
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
+ aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma;
+ }
+ zdev->end_dma = zdev->start_dma + aperture_size - 1;
+
s390_domain->domain.geometry.force_aperture = true;
s390_domain->domain.geometry.aperture_start = 0;
- s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1;
+ s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain);
spin_lock_init(&s390_domain->list_lock);
INIT_LIST_HEAD_RCU(&s390_domain->devices);
@@ -356,7 +571,7 @@ static void s390_iommu_rcu_free_domain(struct rcu_head *head)
{
struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
- dma_cleanup_tables(s390_domain->dma_table);
+ dma_cleanup_tables(s390_domain);
kfree(s390_domain);
}
@@ -381,6 +596,21 @@ static void zdev_s390_domain_update(struct zpci_dev *zdev,
spin_unlock_irqrestore(&zdev->dom_lock, flags);
}
+static u64 get_iota_region_flag(struct s390_domain *domain)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RTX:
+ return ZPCI_IOTA_RTTO_FLAG;
+ case ZPCI_TABLE_TYPE_RSX:
+ return ZPCI_IOTA_RSTO_FLAG;
+ case ZPCI_TABLE_TYPE_RFX:
+ return ZPCI_IOTA_RFTO_FLAG;
+ default:
+ WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
+ return 0;
+ }
+}
+
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@@ -399,7 +629,7 @@ static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
default:
s390_domain = to_s390_domain(domain);
iota = virt_to_phys(s390_domain->dma_table) |
- ZPCI_IOTA_RTTO_FLAG;
+ get_iota_region_flag(s390_domain);
rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
zdev->end_dma, iota, status);
}
@@ -482,6 +712,8 @@ static void s390_iommu_get_resv_regions(struct device *dev,
{
struct zpci_dev *zdev = to_zpci_dev(dev);
struct iommu_resv_region *region;
+ u64 max_size, end_resv;
+ unsigned long flags;
if (zdev->start_dma) {
region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
@@ -491,10 +723,21 @@ static void s390_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, list);
}
- if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) {
- region = iommu_alloc_resv_region(zdev->end_dma + 1,
- ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1,
- 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
+ spin_lock_irqsave(&zdev->dom_lock, flags);
+ if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) {
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+ return;
+ }
+
+ max_size = max_tbl_size(to_s390_domain(zdev->s390_domain));
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+
+ if (zdev->end_dma < max_size) {
+ end_resv = max_size - zdev->end_dma;
+ region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv,
+ 0, IOMMU_RESV_RESERVED,
+ GFP_KERNEL);
if (!region)
return;
list_add_tail(&region->list, list);
@@ -510,13 +753,9 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
zdev = to_zpci_dev(dev);
- if (zdev->start_dma > zdev->end_dma ||
- zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1)
+ if (zdev->start_dma > zdev->end_dma)
return ERR_PTR(-EINVAL);
- if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
- zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
-
if (zdev->tlb_refresh)
dev->iommu->shadow_on_flush = 1;
@@ -606,8 +845,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
int rc;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
- gfp);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
if (unlikely(!entry)) {
rc = -ENOMEM;
goto undo_cpu_trans;
@@ -622,8 +860,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
undo_cpu_trans:
while (i-- > 0) {
dma_addr -= PAGE_SIZE;
- entry = dma_walk_cpu_trans(s390_domain->dma_table,
- dma_addr, gfp);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
if (!entry)
break;
dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
@@ -640,8 +877,7 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
int rc = 0;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
- GFP_ATOMIC);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC);
if (unlikely(!entry)) {
rc = -EINVAL;
break;
@@ -685,6 +921,51 @@ static int s390_iommu_map_pages(struct iommu_domain *domain,
return rc;
}
+static unsigned long *get_rso_from_iova(struct s390_domain *domain,
+ dma_addr_t iova)
+{
+ unsigned long *rfo;
+ unsigned long rfe;
+ unsigned int rfx;
+
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ rfo = domain->dma_table;
+ rfx = calc_rfx(iova);
+ rfe = READ_ONCE(rfo[rfx]);
+ if (!reg_entry_isvalid(rfe))
+ return NULL;
+ return get_rf_rso(rfe);
+ case ZPCI_TABLE_TYPE_RSX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
+static unsigned long *get_rto_from_iova(struct s390_domain *domain,
+ dma_addr_t iova)
+{
+ unsigned long *rso;
+ unsigned long rse;
+ unsigned int rsx;
+
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ case ZPCI_TABLE_TYPE_RSX:
+ rso = get_rso_from_iova(domain, iova);
+ rsx = calc_rsx(iova);
+ rse = READ_ONCE(rso[rsx]);
+ if (!reg_entry_isvalid(rse))
+ return NULL;
+ return get_rs_rto(rse);
+ case ZPCI_TABLE_TYPE_RTX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -698,10 +979,13 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
iova > domain->geometry.aperture_end)
return 0;
+ rto = get_rto_from_iova(s390_domain, iova);
+ if (!rto)
+ return 0;
+
rtx = calc_rtx(iova);
sx = calc_sx(iova);
px = calc_px(iova);
- rto = s390_domain->dma_table;
rte = READ_ONCE(rto[rtx]);
if (reg_entry_isvalid(rte)) {
@@ -756,7 +1040,6 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
int zpci_init_iommu(struct zpci_dev *zdev)
{
- u64 aperture_size;
int rc = 0;
rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
@@ -774,12 +1057,6 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_sysfs;
- zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
- aperture_size = min3(s390_iommu_aperture,
- ZPCI_TABLE_SIZE_RT - zdev->start_dma,
- zdev->end_dma - zdev->start_dma + 1);
- zdev->end_dma = zdev->start_dma + aperture_size - 1;
-
return 0;
out_sysfs:
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 8d8f11854676..76c9620af4bb 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -690,8 +690,8 @@ sun50i_iommu_domain_alloc_paging(struct device *dev)
if (!sun50i_domain)
return NULL;
- sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(DT_SIZE));
+ sun50i_domain->dt =
+ iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, DT_SIZE);
if (!sun50i_domain->dt)
goto err_free_domain;
@@ -713,7 +713,7 @@ static void sun50i_iommu_domain_free(struct iommu_domain *domain)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
- iommu_free_pages(sun50i_domain->dt, get_order(DT_SIZE));
+ iommu_free_pages(sun50i_domain->dt);
sun50i_domain->dt = NULL;
kfree(sun50i_domain);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 69d353e1df84..61897d50162d 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -51,14 +51,17 @@ struct tegra_smmu {
struct iommu_device iommu; /* IOMMU Core code handle */
};
+struct tegra_pd;
+struct tegra_pt;
+
struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
spinlock_t lock;
u32 *count;
- struct page **pts;
- struct page *pd;
+ struct tegra_pt **pts;
+ struct tegra_pd *pd;
dma_addr_t pd_dma;
unsigned id;
u32 attr;
@@ -155,6 +158,14 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
SMMU_PDE_NONSECURE)
+struct tegra_pd {
+ u32 val[SMMU_NUM_PDE];
+};
+
+struct tegra_pt {
+ u32 val[SMMU_NUM_PTE];
+};
+
static unsigned int iova_pd_index(unsigned long iova)
{
return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
@@ -284,7 +295,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0);
+ as->pd = iommu_alloc_pages_sz(GFP_KERNEL | __GFP_DMA, SMMU_SIZE_PD);
if (!as->pd) {
kfree(as);
return NULL;
@@ -292,7 +303,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
if (!as->count) {
- __iommu_free_pages(as->pd, 0);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
@@ -300,7 +311,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
if (!as->pts) {
kfree(as->count);
- __iommu_free_pages(as->pd, 0);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
@@ -417,8 +428,8 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
goto unlock;
}
- as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
- DMA_TO_DEVICE);
+ as->pd_dma =
+ dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, as->pd_dma)) {
err = -ENOMEM;
goto unlock;
@@ -450,7 +461,7 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
return 0;
err_unmap:
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
unlock:
mutex_unlock(&smmu->lock);
@@ -469,7 +480,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
tegra_smmu_free_asid(smmu, as->id);
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
as->smmu = NULL;
@@ -548,11 +559,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
+ struct tegra_pd *pd = as->pd;
unsigned long offset = pd_index * sizeof(*pd);
/* Set the page directory entry first */
- pd[pd_index] = value;
+ pd->val[pd_index] = value;
/* The flush the page directory entry from caches */
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
@@ -564,11 +575,9 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+static u32 *tegra_smmu_pte_offset(struct tegra_pt *pt, unsigned long iova)
{
- u32 *pt = page_address(pt_page);
-
- return pt + iova_pt_index(iova);
+ return &pt->val[iova_pt_index(iova)];
}
static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
@@ -576,21 +585,19 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- struct page *pt_page;
- u32 *pd;
+ struct tegra_pt *pt;
- pt_page = as->pts[pd_index];
- if (!pt_page)
+ pt = as->pts[pd_index];
+ if (!pt)
return NULL;
- pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]);
- return tegra_smmu_pte_offset(pt_page, iova);
+ return tegra_smmu_pte_offset(pt, iova);
}
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- dma_addr_t *dmap, struct page *page)
+ dma_addr_t *dmap, struct tegra_pt *pt)
{
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
@@ -598,30 +605,28 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
if (!as->pts[pde]) {
dma_addr_t dma;
- dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
+ dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
- __iommu_free_pages(page, 0);
+ iommu_free_pages(pt);
return NULL;
}
if (!smmu_dma_addr_valid(smmu, dma)) {
- dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
- __iommu_free_pages(page, 0);
+ dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
return NULL;
}
- as->pts[pde] = page;
+ as->pts[pde] = pt;
tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
SMMU_PDE_NEXT));
*dmap = dma;
} else {
- u32 *pd = page_address(as->pd);
-
- *dmap = smmu_pde_to_dma(smmu, pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -637,7 +642,7 @@ static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
{
unsigned int pde = iova_pd_index(iova);
- struct page *page = as->pts[pde];
+ struct tegra_pt *pt = as->pts[pde];
/*
* When no entries in this page table are used anymore, return the
@@ -645,13 +650,13 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
*/
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]);
tegra_smmu_set_pde(as, iova, 0);
- dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
- __iommu_free_pages(page, 0);
+ dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
as->pts[pde] = NULL;
}
}
@@ -671,16 +676,16 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static struct page *as_get_pde_page(struct tegra_smmu_as *as,
- unsigned long iova, gfp_t gfp,
- unsigned long *flags)
+static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as,
+ unsigned long iova, gfp_t gfp,
+ unsigned long *flags)
{
unsigned int pde = iova_pd_index(iova);
- struct page *page = as->pts[pde];
+ struct tegra_pt *pt = as->pts[pde];
/* at first check whether allocation needs to be done at all */
- if (page)
- return page;
+ if (pt)
+ return pt;
/*
* In order to prevent exhaustion of the atomic memory pool, we
@@ -690,7 +695,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
if (gfpflags_allow_blocking(gfp))
spin_unlock_irqrestore(&as->lock, *flags);
- page = __iommu_alloc_pages(gfp | __GFP_DMA, 0);
+ pt = iommu_alloc_pages_sz(gfp | __GFP_DMA, SMMU_SIZE_PT);
if (gfpflags_allow_blocking(gfp))
spin_lock_irqsave(&as->lock, *flags);
@@ -701,13 +706,13 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
* if allocation succeeded and the allocation failure isn't fatal.
*/
if (as->pts[pde]) {
- if (page)
- __iommu_free_pages(page, 0);
+ if (pt)
+ iommu_free_pages(pt);
- page = as->pts[pde];
+ pt = as->pts[pde];
}
- return page;
+ return pt;
}
static int
@@ -717,15 +722,15 @@ __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
- struct page *page;
+ struct tegra_pt *pt;
u32 pte_attrs;
u32 *pte;
- page = as_get_pde_page(as, iova, gfp, flags);
- if (!page)
+ pt = as_get_pde_page(as, iova, gfp, flags);
+ if (!pt)
return -ENOMEM;
- pte = as_get_pte(as, iova, &pte_dma, page);
+ pte = as_get_pte(as, iova, &pte_dma, pt);
if (!pte)
return -ENOMEM;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index b85ce6310ddb..ecd41fb03e5a 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -48,6 +48,7 @@ struct viommu_dev {
u64 pgsize_bitmap;
u32 first_domain;
u32 last_domain;
+ u32 identity_domain_id;
/* Supported MAP flags */
u32 map_flags;
u32 probe_size;
@@ -62,7 +63,6 @@ struct viommu_mapping {
struct viommu_domain {
struct iommu_domain domain;
struct viommu_dev *viommu;
- struct mutex mutex; /* protects viommu pointer */
unsigned int id;
u32 map_flags;
@@ -70,7 +70,6 @@ struct viommu_domain {
struct rb_root_cached mappings;
unsigned long nr_endpoints;
- bool bypass;
};
struct viommu_endpoint {
@@ -97,6 +96,8 @@ struct viommu_event {
};
};
+static struct viommu_domain viommu_identity_domain;
+
#define to_viommu_domain(domain) \
container_of(domain, struct viommu_domain, domain)
@@ -305,6 +306,22 @@ out_unlock:
return ret;
}
+static int viommu_send_attach_req(struct viommu_dev *viommu, struct device *dev,
+ struct virtio_iommu_req_attach *req)
+{
+ int ret;
+ unsigned int i;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ req->endpoint = cpu_to_le32(fwspec->ids[i]);
+ ret = viommu_send_req_sync(viommu, req, sizeof(*req));
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
/*
* viommu_add_mapping - add a mapping to the internal tree
*
@@ -637,71 +654,45 @@ static void viommu_event_handler(struct virtqueue *vq)
/* IOMMU API */
-static struct iommu_domain *viommu_domain_alloc(unsigned type)
+static struct iommu_domain *viommu_domain_alloc_paging(struct device *dev)
{
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct viommu_dev *viommu = vdev->viommu;
+ unsigned long viommu_page_size;
struct viommu_domain *vdomain;
-
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
-
- vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
- if (!vdomain)
- return NULL;
-
- mutex_init(&vdomain->mutex);
- spin_lock_init(&vdomain->mappings_lock);
- vdomain->mappings = RB_ROOT_CACHED;
-
- return &vdomain->domain;
-}
-
-static int viommu_domain_finalise(struct viommu_endpoint *vdev,
- struct iommu_domain *domain)
-{
int ret;
- unsigned long viommu_page_size;
- struct viommu_dev *viommu = vdev->viommu;
- struct viommu_domain *vdomain = to_viommu_domain(domain);
viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
if (viommu_page_size > PAGE_SIZE) {
dev_err(vdev->dev,
"granule 0x%lx larger than system page size 0x%lx\n",
viommu_page_size, PAGE_SIZE);
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
- ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
- viommu->last_domain, GFP_KERNEL);
- if (ret < 0)
- return ret;
+ vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
+ if (!vdomain)
+ return ERR_PTR(-ENOMEM);
- vdomain->id = (unsigned int)ret;
+ spin_lock_init(&vdomain->mappings_lock);
+ vdomain->mappings = RB_ROOT_CACHED;
- domain->pgsize_bitmap = viommu->pgsize_bitmap;
- domain->geometry = viommu->geometry;
+ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+ viommu->last_domain, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(vdomain);
+ return ERR_PTR(ret);
+ }
- vdomain->map_flags = viommu->map_flags;
- vdomain->viommu = viommu;
+ vdomain->id = (unsigned int)ret;
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- if (virtio_has_feature(viommu->vdev,
- VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
- vdomain->bypass = true;
- return 0;
- }
+ vdomain->domain.pgsize_bitmap = viommu->pgsize_bitmap;
+ vdomain->domain.geometry = viommu->geometry;
- ret = viommu_domain_map_identity(vdev, vdomain);
- if (ret) {
- ida_free(&viommu->domain_ids, vdomain->id);
- vdomain->viommu = NULL;
- return ret;
- }
- }
+ vdomain->map_flags = viommu->map_flags;
+ vdomain->viommu = viommu;
- return 0;
+ return &vdomain->domain;
}
static void viommu_domain_free(struct iommu_domain *domain)
@@ -717,29 +708,37 @@ static void viommu_domain_free(struct iommu_domain *domain)
kfree(vdomain);
}
+static struct iommu_domain *viommu_domain_alloc_identity(struct device *dev)
+{
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct iommu_domain *domain;
+ int ret;
+
+ if (virtio_has_feature(vdev->viommu->vdev,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG))
+ return &viommu_identity_domain.domain;
+
+ domain = viommu_domain_alloc_paging(dev);
+ if (IS_ERR(domain))
+ return domain;
+
+ ret = viommu_domain_map_identity(vdev, to_viommu_domain(domain));
+ if (ret) {
+ viommu_domain_free(domain);
+ return ERR_PTR(ret);
+ }
+ return domain;
+}
+
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- int i;
int ret = 0;
struct virtio_iommu_req_attach req;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
struct viommu_domain *vdomain = to_viommu_domain(domain);
- mutex_lock(&vdomain->mutex);
- if (!vdomain->viommu) {
- /*
- * Properly initialize the domain now that we know which viommu
- * owns it.
- */
- ret = viommu_domain_finalise(vdev, domain);
- } else if (vdomain->viommu != vdev->viommu) {
- ret = -EINVAL;
- }
- mutex_unlock(&vdomain->mutex);
-
- if (ret)
- return ret;
+ if (vdomain->viommu != vdev->viommu)
+ return -EINVAL;
/*
* In the virtio-iommu device, when attaching the endpoint to a new
@@ -761,16 +760,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
.domain = cpu_to_le32(vdomain->id),
};
- if (vdomain->bypass)
- req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
-
- for (i = 0; i < fwspec->num_ids; i++) {
- req.endpoint = cpu_to_le32(fwspec->ids[i]);
-
- ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
- if (ret)
- return ret;
- }
+ ret = viommu_send_attach_req(vdomain->viommu, dev, &req);
+ if (ret)
+ return ret;
if (!vdomain->nr_endpoints) {
/*
@@ -788,6 +780,40 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0;
}
+static int viommu_attach_identity_domain(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret = 0;
+ struct virtio_iommu_req_attach req;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+ req = (struct virtio_iommu_req_attach) {
+ .head.type = VIRTIO_IOMMU_T_ATTACH,
+ .domain = cpu_to_le32(vdev->viommu->identity_domain_id),
+ .flags = cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS),
+ };
+
+ ret = viommu_send_attach_req(vdev->viommu, dev, &req);
+ if (ret)
+ return ret;
+
+ if (vdev->vdomain)
+ vdev->vdomain->nr_endpoints--;
+ vdomain->nr_endpoints++;
+ vdev->vdomain = vdomain;
+ return 0;
+}
+
+static struct viommu_domain viommu_identity_domain = {
+ .domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &(const struct iommu_domain_ops) {
+ .attach_dev = viommu_attach_identity_domain,
+ },
+ },
+};
+
static void viommu_detach_dev(struct viommu_endpoint *vdev)
{
int i;
@@ -1062,7 +1088,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_ops viommu_ops = {
.capable = viommu_capable,
- .domain_alloc = viommu_domain_alloc,
+ .domain_alloc_identity = viommu_domain_alloc_identity,
+ .domain_alloc_paging = viommu_domain_alloc_paging,
.probe_device = viommu_probe_device,
.release_device = viommu_release_device,
.device_group = viommu_device_group,
@@ -1184,6 +1211,12 @@ static int viommu_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
+ /* Reserve an ID to use as the bypass domain */
+ if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
+ viommu->identity_domain_id = viommu->first_domain;
+ viommu->first_domain++;
+ }
+
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
virtio_device_ready(vdev);
diff --git a/drivers/leds/.kunitconfig b/drivers/leds/.kunitconfig
new file mode 100644
index 000000000000..5180f77910a1
--- /dev/null
+++ b/drivers/leds/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_KUNIT_TEST=y
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a104cbb0a001..6e3dce7e35a4 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -55,6 +55,13 @@ config LEDS_BRIGHTNESS_HW_CHANGED
See Documentation/ABI/testing/sysfs-class-led for details.
+config LEDS_KUNIT_TEST
+ tristate "KUnit tests for LEDs"
+ depends on KUNIT && LEDS_CLASS
+ default KUNIT_ALL_TESTS
+ help
+ Say Y here to enable KUnit testing for the LEDs framework.
+
comment "LED drivers"
config LEDS_88PM860X
@@ -735,7 +742,7 @@ config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
depends on MACH_KIRKWOOD || MACH_ARMADA_370 || COMPILE_TEST
- default y
+ default y if MACH_KIRKWOOD || MACH_ARMADA_370
help
This option enables support for the dual-GPIO LEDs found on the
following LaCie/Seagate boards:
@@ -750,7 +757,7 @@ config LEDS_NETXBIG
depends on LEDS_CLASS
depends on MACH_KIRKWOOD || COMPILE_TEST
depends on OF_GPIO
- default y
+ default MACH_KIRKWOOD
help
This option enables support for LEDs found on the LaCie 2Big
and 5Big Network v2 boards. The LEDs are wired to a CPLD and are
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2f170d69dcbf..9a0333ec1a86 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_LEDS_CLASS) += led-class.o
obj-$(CONFIG_LEDS_CLASS_FLASH) += led-class-flash.o
obj-$(CONFIG_LEDS_CLASS_MULTICOLOR) += led-class-multicolor.o
obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
+obj-$(CONFIG_LEDS_KUNIT_TEST) += led-test.o
# LED Platform Drivers (keep this sorted, M-| sort)
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index effaaaf302b5..c9027f9c4bb7 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -450,7 +450,7 @@ static int sso_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(reg_val & BIT(offset));
}
-static void sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct sso_led_priv *priv = gpiochip_get_data(chip);
@@ -458,6 +458,8 @@ static void sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
if (!priv->gpio.freq)
regmap_update_bits(priv->mmap, SSO_CON0, SSO_CON0_SWU,
SSO_CON0_SWU);
+
+ return 0;
}
static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
@@ -469,7 +471,7 @@ static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
gc->get_direction = sso_gpio_get_dir;
gc->direction_output = sso_gpio_dir_out;
gc->get = sso_gpio_get;
- gc->set = sso_gpio_set;
+ gc->set_rv = sso_gpio_set;
gc->label = "lgm-sso";
gc->base = -1;
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index f39f0bfe6eef..55ca663ca506 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -132,4 +132,15 @@ config LEDS_SY7802
This driver can be built as a module, it will be called "leds-sy7802".
+config LEDS_TPS6131X
+ tristate "LED support for TI TPS6131x flash LED driver"
+ depends on I2C && OF
+ depends on GPIOLIB
+ select REGMAP_I2C
+ help
+ This option enables support for Texas Instruments TPS61310/TPS61311
+ flash LED driver.
+
+ This driver can be built as a module, it will be called "leds-tps6131x".
+
endif # LEDS_CLASS_FLASH
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
index 48860eeced79..712fb737a428 100644
--- a/drivers/leds/flash/Makefile
+++ b/drivers/leds/flash/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_LEDS_RT4505) += leds-rt4505.o
obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
obj-$(CONFIG_LEDS_SGM3140) += leds-sgm3140.o
obj-$(CONFIG_LEDS_SY7802) += leds-sy7802.o
+obj-$(CONFIG_LEDS_TPS6131X) += leds-tps6131x.o
diff --git a/drivers/leds/flash/leds-tps6131x.c b/drivers/leds/flash/leds-tps6131x.c
new file mode 100644
index 000000000000..6f4d4fd55361
--- /dev/null
+++ b/drivers/leds/flash/leds-tps6131x.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Texas Instruments TPS61310/TPS61311 flash LED driver with I2C interface
+ *
+ * Copyright 2025 Matthias Fend <matthias.fend@emfend.at>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/led-class-flash.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define TPS6131X_REG_0 0x00
+#define TPS6131X_REG_0_RESET BIT(7)
+#define TPS6131X_REG_0_DCLC13 GENMASK(5, 3)
+#define TPS6131X_REG_0_DCLC13_SHIFT 3
+#define TPS6131X_REG_0_DCLC2 GENMASK(2, 0)
+#define TPS6131X_REG_0_DCLC2_SHIFT 0
+
+#define TPS6131X_REG_1 0x01
+#define TPS6131X_REG_1_MODE GENMASK(7, 6)
+#define TPS6131X_REG_1_MODE_SHIFT 6
+#define TPS6131X_REG_1_FC2 GENMASK(5, 0)
+#define TPS6131X_REG_1_FC2_SHIFT 0
+
+#define TPS6131X_REG_2 0x02
+#define TPS6131X_REG_2_MODE GENMASK(7, 6)
+#define TPS6131X_REG_2_MODE_SHIFT 6
+#define TPS6131X_REG_2_ENVM BIT(5)
+#define TPS6131X_REG_2_FC13 GENMASK(4, 0)
+#define TPS6131X_REG_2_FC13_SHIFT 0
+
+#define TPS6131X_REG_3 0x03
+#define TPS6131X_REG_3_STIM GENMASK(7, 5)
+#define TPS6131X_REG_3_STIM_SHIFT 5
+#define TPS6131X_REG_3_HPFL BIT(4)
+#define TPS6131X_REG_3_SELSTIM_TO BIT(3)
+#define TPS6131X_REG_3_STT BIT(2)
+#define TPS6131X_REG_3_SFT BIT(1)
+#define TPS6131X_REG_3_TXMASK BIT(0)
+
+#define TPS6131X_REG_4 0x04
+#define TPS6131X_REG_4_PG BIT(7)
+#define TPS6131X_REG_4_HOTDIE_HI BIT(6)
+#define TPS6131X_REG_4_HOTDIE_LO BIT(5)
+#define TPS6131X_REG_4_ILIM BIT(4)
+#define TPS6131X_REG_4_INDC GENMASK(3, 0)
+#define TPS6131X_REG_4_INDC_SHIFT 0
+
+#define TPS6131X_REG_5 0x05
+#define TPS6131X_REG_5_SELFCAL BIT(7)
+#define TPS6131X_REG_5_ENPSM BIT(6)
+#define TPS6131X_REG_5_STSTRB1_DIR BIT(5)
+#define TPS6131X_REG_5_GPIO BIT(4)
+#define TPS6131X_REG_5_GPIOTYPE BIT(3)
+#define TPS6131X_REG_5_ENLED3 BIT(2)
+#define TPS6131X_REG_5_ENLED2 BIT(1)
+#define TPS6131X_REG_5_ENLED1 BIT(0)
+
+#define TPS6131X_REG_6 0x06
+#define TPS6131X_REG_6_ENTS BIT(7)
+#define TPS6131X_REG_6_LEDHOT BIT(6)
+#define TPS6131X_REG_6_LEDWARN BIT(5)
+#define TPS6131X_REG_6_LEDHDR BIT(4)
+#define TPS6131X_REG_6_OV GENMASK(3, 0)
+#define TPS6131X_REG_6_OV_SHIFT 0
+
+#define TPS6131X_REG_7 0x07
+#define TPS6131X_REG_7_ENBATMON BIT(7)
+#define TPS6131X_REG_7_BATDROOP GENMASK(6, 4)
+#define TPS6131X_REG_7_BATDROOP_SHIFT 4
+#define TPS6131X_REG_7_REVID GENMASK(2, 0)
+#define TPS6131X_REG_7_REVID_SHIFT 0
+
+#define TPS6131X_MAX_CHANNELS 3
+
+#define TPS6131X_FLASH_MAX_I_CHAN13_MA 400
+#define TPS6131X_FLASH_MAX_I_CHAN2_MA 800
+#define TPS6131X_FLASH_STEP_I_MA 25
+
+#define TPS6131X_TORCH_MAX_I_CHAN13_MA 175
+#define TPS6131X_TORCH_MAX_I_CHAN2_MA 175
+#define TPS6131X_TORCH_STEP_I_MA 25
+
+/* The torch watchdog timer must be refreshed within an interval of 13 seconds. */
+#define TPS6131X_TORCH_REFRESH_INTERVAL_JIFFIES msecs_to_jiffies(10000)
+
+#define UA_TO_MA(UA) ((UA) / 1000)
+
+enum tps6131x_mode {
+ TPS6131X_MODE_SHUTDOWN = 0x0,
+ TPS6131X_MODE_TORCH = 0x1,
+ TPS6131X_MODE_FLASH = 0x2,
+};
+
+struct tps6131x {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ /*
+ * Registers 0, 1, 2, and 3 control parts of the controller that are not completely
+ * independent of each other. Since some operations require the registers to be written in
+ * a specific order to avoid unwanted side effects, they are synchronized with a lock.
+ */
+ struct mutex lock; /* Hardware access lock for register 0, 1, 2 and 3 */
+ struct delayed_work torch_refresh_work;
+ bool valley_current_limit;
+ bool chan1_en;
+ bool chan2_en;
+ bool chan3_en;
+ struct fwnode_handle *led_node;
+ u32 max_flash_current_ma;
+ u32 step_flash_current_ma;
+ u32 max_torch_current_ma;
+ u32 step_torch_current_ma;
+ u32 max_timeout_us;
+ struct led_classdev_flash fled_cdev;
+ struct v4l2_flash *v4l2_flash;
+};
+
+static struct tps6131x *fled_cdev_to_tps6131x(struct led_classdev_flash *fled_cdev)
+{
+ return container_of(fled_cdev, struct tps6131x, fled_cdev);
+}
+
+/*
+ * Register contents after a power on/reset. These values cannot be changed.
+ */
+
+#define TPS6131X_DCLC2_50MA 2
+#define TPS6131X_DCLC13_25MA 1
+#define TPS6131X_FC2_400MA 16
+#define TPS6131X_FC13_200MA 8
+#define TPS6131X_STIM_0_579MS_1_37MS 6
+#define TPS6131X_SELSTIM_RANGE0 0
+#define TPS6131X_INDC_OFF 0
+#define TPS6131X_OV_4950MV 9
+#define TPS6131X_BATDROOP_150MV 4
+
+static const struct reg_default tps6131x_regmap_defaults[] = {
+ { TPS6131X_REG_0, (TPS6131X_DCLC13_25MA << TPS6131X_REG_0_DCLC13_SHIFT) |
+ (TPS6131X_DCLC2_50MA << TPS6131X_REG_0_DCLC2_SHIFT) },
+ { TPS6131X_REG_1, (TPS6131X_MODE_SHUTDOWN << TPS6131X_REG_1_MODE_SHIFT) |
+ (TPS6131X_FC2_400MA << TPS6131X_REG_1_FC2_SHIFT) },
+ { TPS6131X_REG_2, (TPS6131X_MODE_SHUTDOWN << TPS6131X_REG_2_MODE_SHIFT) |
+ (TPS6131X_FC13_200MA << TPS6131X_REG_2_FC13_SHIFT) },
+ { TPS6131X_REG_3, (TPS6131X_STIM_0_579MS_1_37MS << TPS6131X_REG_3_STIM_SHIFT) |
+ (TPS6131X_SELSTIM_RANGE0 << TPS6131X_REG_3_SELSTIM_TO) |
+ TPS6131X_REG_3_TXMASK },
+ { TPS6131X_REG_4, (TPS6131X_INDC_OFF << TPS6131X_REG_4_INDC_SHIFT) },
+ { TPS6131X_REG_5, TPS6131X_REG_5_ENPSM | TPS6131X_REG_5_STSTRB1_DIR |
+ TPS6131X_REG_5_GPIOTYPE | TPS6131X_REG_5_ENLED2 },
+ { TPS6131X_REG_6, (TPS6131X_OV_4950MV << TPS6131X_REG_6_OV_SHIFT) },
+ { TPS6131X_REG_7, (TPS6131X_BATDROOP_150MV << TPS6131X_REG_7_BATDROOP_SHIFT) },
+};
+
+/*
+ * These registers contain flags that are reset when read.
+ */
+static bool tps6131x_regmap_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS6131X_REG_3:
+ case TPS6131X_REG_4:
+ case TPS6131X_REG_6:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tps6131x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS6131X_REG_7,
+ .reg_defaults = tps6131x_regmap_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tps6131x_regmap_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .precious_reg = &tps6131x_regmap_precious,
+};
+
+struct tps6131x_timer_config {
+ u8 val;
+ u8 range;
+ u32 time_us;
+};
+
+static const struct tps6131x_timer_config tps6131x_timer_configs[] = {
+ { .val = 0, .range = 1, .time_us = 5300 },
+ { .val = 1, .range = 1, .time_us = 10700 },
+ { .val = 2, .range = 1, .time_us = 16000 },
+ { .val = 3, .range = 1, .time_us = 21300 },
+ { .val = 4, .range = 1, .time_us = 26600 },
+ { .val = 5, .range = 1, .time_us = 32000 },
+ { .val = 6, .range = 1, .time_us = 37300 },
+ { .val = 0, .range = 0, .time_us = 68200 },
+ { .val = 7, .range = 1, .time_us = 71500 },
+ { .val = 1, .range = 0, .time_us = 102200 },
+ { .val = 2, .range = 0, .time_us = 136300 },
+ { .val = 3, .range = 0, .time_us = 170400 },
+ { .val = 4, .range = 0, .time_us = 204500 },
+ { .val = 5, .range = 0, .time_us = 340800 },
+ { .val = 6, .range = 0, .time_us = 579300 },
+ { .val = 7, .range = 0, .time_us = 852000 },
+};
+
+static const struct tps6131x_timer_config *tps6131x_find_closest_timer_config(u32 timeout_us)
+{
+ const struct tps6131x_timer_config *timer_config = &tps6131x_timer_configs[0];
+ u32 diff, min_diff = U32_MAX;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps6131x_timer_configs); i++) {
+ diff = abs(tps6131x_timer_configs[i].time_us - timeout_us);
+ if (diff < min_diff) {
+ timer_config = &tps6131x_timer_configs[i];
+ min_diff = diff;
+ if (!min_diff)
+ break;
+ }
+ }
+
+ return timer_config;
+}
+
+static int tps6131x_reset_chip(struct tps6131x *tps6131x)
+{
+ int ret;
+
+ if (tps6131x->reset_gpio) {
+ gpiod_set_value_cansleep(tps6131x->reset_gpio, 1);
+ fsleep(10);
+ gpiod_set_value_cansleep(tps6131x->reset_gpio, 0);
+ fsleep(100);
+ } else {
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_0, TPS6131X_REG_0_RESET,
+ TPS6131X_REG_0_RESET);
+ if (ret)
+ return ret;
+
+ fsleep(100);
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_0, TPS6131X_REG_0_RESET, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps6131x_init_chip(struct tps6131x *tps6131x)
+{
+ u32 val;
+ int ret;
+
+ val = tps6131x->valley_current_limit ? TPS6131X_REG_4_ILIM : 0;
+
+ ret = regmap_write(tps6131x->regmap, TPS6131X_REG_4, val);
+ if (ret)
+ return ret;
+
+ val = TPS6131X_REG_5_ENPSM | TPS6131X_REG_5_STSTRB1_DIR | TPS6131X_REG_5_GPIOTYPE;
+
+ if (tps6131x->chan1_en)
+ val |= TPS6131X_REG_5_ENLED1;
+
+ if (tps6131x->chan2_en)
+ val |= TPS6131X_REG_5_ENLED2;
+
+ if (tps6131x->chan3_en)
+ val |= TPS6131X_REG_5_ENLED3;
+
+ ret = regmap_write(tps6131x->regmap, TPS6131X_REG_5, val);
+ if (ret)
+ return ret;
+
+ val = TPS6131X_REG_6_ENTS;
+
+ ret = regmap_write(tps6131x->regmap, TPS6131X_REG_6, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6131x_set_mode(struct tps6131x *tps6131x, enum tps6131x_mode mode, bool force)
+{
+ u8 val = mode << TPS6131X_REG_1_MODE_SHIFT;
+
+ return regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_1, TPS6131X_REG_1_MODE, val,
+ NULL, false, force);
+}
+
+static void tps6131x_torch_refresh_handler(struct work_struct *work)
+{
+ struct tps6131x *tps6131x = container_of(work, struct tps6131x, torch_refresh_work.work);
+ int ret;
+
+ guard(mutex)(&tps6131x->lock);
+
+ ret = tps6131x_set_mode(tps6131x, TPS6131X_MODE_TORCH, true);
+ if (ret < 0) {
+ dev_err(tps6131x->dev, "Failed to refresh torch watchdog timer\n");
+ return;
+ }
+
+ schedule_delayed_work(&tps6131x->torch_refresh_work,
+ TPS6131X_TORCH_REFRESH_INTERVAL_JIFFIES);
+}
+
+static int tps6131x_brightness_set(struct led_classdev *cdev, enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(cdev);
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ u32 num_chans, steps_chan13, steps_chan2, steps_remaining;
+ u8 reg0;
+ int ret;
+
+ cancel_delayed_work_sync(&tps6131x->torch_refresh_work);
+
+ /*
+ * The brightness parameter uses the number of current steps as the unit (not the current
+ * value itself). Since the reported step size can vary depending on the configuration,
+ * this value must be converted into actual register steps.
+ */
+ steps_remaining = (brightness * tps6131x->step_torch_current_ma) / TPS6131X_TORCH_STEP_I_MA;
+
+ num_chans = tps6131x->chan1_en + tps6131x->chan2_en + tps6131x->chan3_en;
+
+ /*
+ * The currents are distributed as evenly as possible across the activated channels.
+ * Since channels 1 and 3 share the same register setting, they always use the same current
+ * value. Channel 2 supports higher currents and thus takes over the remaining additional
+ * portion that cannot be covered by the other channels.
+ */
+ steps_chan13 = min_t(u32, steps_remaining / num_chans,
+ TPS6131X_TORCH_MAX_I_CHAN13_MA / TPS6131X_TORCH_STEP_I_MA);
+ if (tps6131x->chan1_en)
+ steps_remaining -= steps_chan13;
+ if (tps6131x->chan3_en)
+ steps_remaining -= steps_chan13;
+
+ steps_chan2 = min_t(u32, steps_remaining,
+ TPS6131X_TORCH_MAX_I_CHAN2_MA / TPS6131X_TORCH_STEP_I_MA);
+
+ guard(mutex)(&tps6131x->lock);
+
+ reg0 = (steps_chan13 << TPS6131X_REG_0_DCLC13_SHIFT) |
+ (steps_chan2 << TPS6131X_REG_0_DCLC2_SHIFT);
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_0,
+ TPS6131X_REG_0_DCLC13 | TPS6131X_REG_0_DCLC2, reg0);
+ if (ret < 0)
+ return ret;
+
+ ret = tps6131x_set_mode(tps6131x, brightness ? TPS6131X_MODE_TORCH : TPS6131X_MODE_SHUTDOWN,
+ true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * In order to use both the flash and the video light functions purely via the I2C
+ * interface, STRB1 must be low. If STRB1 is low, then the video light watchdog timer
+ * is also active, which puts the device into the shutdown state after around 13 seconds.
+ * To prevent this, the mode must be refreshed within the watchdog timeout.
+ */
+ if (brightness)
+ schedule_delayed_work(&tps6131x->torch_refresh_work,
+ TPS6131X_TORCH_REFRESH_INTERVAL_JIFFIES);
+
+ return 0;
+}
+
+static int tps6131x_strobe_set(struct led_classdev_flash *fled_cdev, bool state)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ int ret;
+
+ guard(mutex)(&tps6131x->lock);
+
+ ret = tps6131x_set_mode(tps6131x, state ? TPS6131X_MODE_FLASH : TPS6131X_MODE_SHUTDOWN,
+ true);
+ if (ret < 0)
+ return ret;
+
+ if (state) {
+ ret = regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_3, TPS6131X_REG_3_SFT,
+ TPS6131X_REG_3_SFT, NULL, false, true);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_3, TPS6131X_REG_3_SFT, 0, NULL,
+ false, true);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6131x_flash_brightness_set(struct led_classdev_flash *fled_cdev, u32 brightness)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ u32 num_chans;
+ u32 steps_chan13, steps_chan2;
+ u32 steps_remaining;
+ int ret;
+
+ steps_remaining = brightness / TPS6131X_FLASH_STEP_I_MA;
+ num_chans = tps6131x->chan1_en + tps6131x->chan2_en + tps6131x->chan3_en;
+ steps_chan13 = min_t(u32, steps_remaining / num_chans,
+ TPS6131X_FLASH_MAX_I_CHAN13_MA / TPS6131X_FLASH_STEP_I_MA);
+ if (tps6131x->chan1_en)
+ steps_remaining -= steps_chan13;
+ if (tps6131x->chan3_en)
+ steps_remaining -= steps_chan13;
+ steps_chan2 = min_t(u32, steps_remaining,
+ TPS6131X_FLASH_MAX_I_CHAN2_MA / TPS6131X_FLASH_STEP_I_MA);
+
+ guard(mutex)(&tps6131x->lock);
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_2, TPS6131X_REG_2_FC13,
+ steps_chan13 << TPS6131X_REG_2_FC13_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_1, TPS6131X_REG_1_FC2,
+ steps_chan2 << TPS6131X_REG_1_FC2_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ fled_cdev->brightness.val = brightness;
+
+ return 0;
+}
+
+static int tps6131x_flash_timeout_set(struct led_classdev_flash *fled_cdev, u32 timeout_us)
+{
+ const struct tps6131x_timer_config *timer_config;
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ u8 reg3;
+ int ret;
+
+ guard(mutex)(&tps6131x->lock);
+
+ timer_config = tps6131x_find_closest_timer_config(timeout_us);
+
+ reg3 = timer_config->val << TPS6131X_REG_3_STIM_SHIFT;
+ if (timer_config->range)
+ reg3 |= TPS6131X_REG_3_SELSTIM_TO;
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_3,
+ TPS6131X_REG_3_STIM | TPS6131X_REG_3_SELSTIM_TO, reg3);
+ if (ret < 0)
+ return ret;
+
+ fled_cdev->timeout.val = timer_config->time_us;
+
+ return 0;
+}
+
+static int tps6131x_strobe_get(struct led_classdev_flash *fled_cdev, bool *state)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ unsigned int reg3;
+ int ret;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_3, &reg3);
+ if (ret)
+ return ret;
+
+ *state = !!(reg3 & TPS6131X_REG_3_SFT);
+
+ return 0;
+}
+
+static int tps6131x_flash_fault_get(struct led_classdev_flash *fled_cdev, u32 *fault)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ unsigned int reg3, reg4, reg6;
+ int ret;
+
+ *fault = 0;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_3, &reg3);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_4, &reg4);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_6, &reg6);
+ if (ret < 0)
+ return ret;
+
+ if (reg3 & TPS6131X_REG_3_HPFL)
+ *fault |= LED_FAULT_SHORT_CIRCUIT;
+
+ if (reg3 & TPS6131X_REG_3_SELSTIM_TO)
+ *fault |= LED_FAULT_TIMEOUT;
+
+ if (reg4 & TPS6131X_REG_4_HOTDIE_HI)
+ *fault |= LED_FAULT_OVER_TEMPERATURE;
+
+ if (reg6 & (TPS6131X_REG_6_LEDHOT | TPS6131X_REG_6_LEDWARN))
+ *fault |= LED_FAULT_LED_OVER_TEMPERATURE;
+
+ if (!(reg6 & TPS6131X_REG_6_LEDHDR))
+ *fault |= LED_FAULT_UNDER_VOLTAGE;
+
+ if (reg6 & TPS6131X_REG_6_LEDHOT) {
+ ret = regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_6,
+ TPS6131X_REG_6_LEDHOT, 0, NULL, false, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct led_flash_ops flash_ops = {
+ .flash_brightness_set = tps6131x_flash_brightness_set,
+ .strobe_set = tps6131x_strobe_set,
+ .strobe_get = tps6131x_strobe_get,
+ .timeout_set = tps6131x_flash_timeout_set,
+ .fault_get = tps6131x_flash_fault_get,
+};
+
+static int tps6131x_parse_node(struct tps6131x *tps6131x)
+{
+ const struct tps6131x_timer_config *timer_config;
+ struct device *dev = tps6131x->dev;
+ u32 channels[TPS6131X_MAX_CHANNELS];
+ u32 current_step_multiplier;
+ u32 current_ua;
+ u32 max_current_flash_ma, max_current_torch_ma;
+ u32 timeout_us;
+ int num_channels;
+ int i;
+ int ret;
+
+ tps6131x->valley_current_limit = device_property_read_bool(dev, "ti,valley-current-limit");
+
+ tps6131x->led_node = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+ if (!tps6131x->led_node) {
+ dev_err(dev, "Missing LED node\n");
+ return -EINVAL;
+ }
+
+ num_channels = fwnode_property_count_u32(tps6131x->led_node, "led-sources");
+ if (num_channels <= 0) {
+ dev_err(dev, "Failed to read led-sources property\n");
+ return -EINVAL;
+ }
+
+ if (num_channels > TPS6131X_MAX_CHANNELS) {
+ dev_err(dev, "led-sources count %u exceeds maximum channel count %u\n",
+ num_channels, TPS6131X_MAX_CHANNELS);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32_array(tps6131x->led_node, "led-sources", channels,
+ num_channels);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read led-sources property\n");
+ return ret;
+ }
+
+ max_current_flash_ma = 0;
+ max_current_torch_ma = 0;
+ for (i = 0; i < num_channels; i++) {
+ switch (channels[i]) {
+ case 1:
+ tps6131x->chan1_en = true;
+ max_current_flash_ma += TPS6131X_FLASH_MAX_I_CHAN13_MA;
+ max_current_torch_ma += TPS6131X_TORCH_MAX_I_CHAN13_MA;
+ break;
+ case 2:
+ tps6131x->chan2_en = true;
+ max_current_flash_ma += TPS6131X_FLASH_MAX_I_CHAN2_MA;
+ max_current_torch_ma += TPS6131X_TORCH_MAX_I_CHAN2_MA;
+ break;
+ case 3:
+ tps6131x->chan3_en = true;
+ max_current_flash_ma += TPS6131X_FLASH_MAX_I_CHAN13_MA;
+ max_current_torch_ma += TPS6131X_TORCH_MAX_I_CHAN13_MA;
+ break;
+ default:
+ dev_err(dev, "led-source out of range [1-3]\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * If only channels 1 and 3 are used, the step size is doubled because the two channels
+ * share the same current control register.
+ */
+ current_step_multiplier =
+ (tps6131x->chan1_en && tps6131x->chan3_en && !tps6131x->chan2_en) ? 2 : 1;
+ tps6131x->step_flash_current_ma = current_step_multiplier * TPS6131X_FLASH_STEP_I_MA;
+ tps6131x->step_torch_current_ma = current_step_multiplier * TPS6131X_TORCH_STEP_I_MA;
+
+ ret = fwnode_property_read_u32(tps6131x->led_node, "led-max-microamp", &current_ua);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read led-max-microamp property\n");
+ return ret;
+ }
+
+ tps6131x->max_torch_current_ma = UA_TO_MA(current_ua);
+
+ if (!tps6131x->max_torch_current_ma ||
+ tps6131x->max_torch_current_ma > max_current_torch_ma ||
+ (tps6131x->max_torch_current_ma % tps6131x->step_torch_current_ma)) {
+ dev_err(dev, "led-max-microamp out of range or not a multiple of %u\n",
+ tps6131x->step_torch_current_ma);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32(tps6131x->led_node, "flash-max-microamp", &current_ua);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read flash-max-microamp property\n");
+ return ret;
+ }
+
+ tps6131x->max_flash_current_ma = UA_TO_MA(current_ua);
+
+ if (!tps6131x->max_flash_current_ma ||
+ tps6131x->max_flash_current_ma > max_current_flash_ma ||
+ (tps6131x->max_flash_current_ma % tps6131x->step_flash_current_ma)) {
+ dev_err(dev, "flash-max-microamp out of range or not a multiple of %u\n",
+ tps6131x->step_flash_current_ma);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32(tps6131x->led_node, "flash-max-timeout-us", &timeout_us);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read flash-max-timeout-us property\n");
+ return ret;
+ }
+
+ timer_config = tps6131x_find_closest_timer_config(timeout_us);
+ tps6131x->max_timeout_us = timer_config->time_us;
+
+ if (tps6131x->max_timeout_us != timeout_us)
+ dev_warn(dev, "flash-max-timeout-us %u not supported (using %u)\n", timeout_us,
+ tps6131x->max_timeout_us);
+
+ return 0;
+}
+
+static int tps6131x_led_class_setup(struct tps6131x *tps6131x)
+{
+ const struct tps6131x_timer_config *timer_config;
+ struct led_classdev *led_cdev;
+ struct led_flash_setting *setting;
+ struct led_init_data init_data = {};
+ int ret;
+
+ tps6131x->fled_cdev.ops = &flash_ops;
+
+ setting = &tps6131x->fled_cdev.timeout;
+ timer_config = tps6131x_find_closest_timer_config(0);
+ setting->min = timer_config->time_us;
+ setting->max = tps6131x->max_timeout_us;
+ setting->step = 1; /* Only some specific time periods are supported. No fixed step size. */
+ setting->val = setting->min;
+
+ setting = &tps6131x->fled_cdev.brightness;
+ setting->min = tps6131x->step_flash_current_ma;
+ setting->max = tps6131x->max_flash_current_ma;
+ setting->step = tps6131x->step_flash_current_ma;
+ setting->val = setting->min;
+
+ led_cdev = &tps6131x->fled_cdev.led_cdev;
+ led_cdev->brightness_set_blocking = tps6131x_brightness_set;
+ led_cdev->max_brightness = tps6131x->max_torch_current_ma;
+ led_cdev->flags |= LED_DEV_CAP_FLASH;
+
+ init_data.fwnode = tps6131x->led_node;
+ init_data.devicename = NULL;
+ init_data.default_label = NULL;
+ init_data.devname_mandatory = false;
+
+ ret = devm_led_classdev_flash_register_ext(tps6131x->dev, &tps6131x->fled_cdev,
+ &init_data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6131x_flash_external_strobe_set(struct v4l2_flash *v4l2_flash, bool enable)
+{
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+
+ guard(mutex)(&tps6131x->lock);
+
+ return tps6131x_set_mode(tps6131x, enable ? TPS6131X_MODE_FLASH : TPS6131X_MODE_SHUTDOWN,
+ false);
+}
+
+static const struct v4l2_flash_ops tps6131x_v4l2_flash_ops = {
+ .external_strobe_set = tps6131x_flash_external_strobe_set,
+};
+
+static int tps6131x_v4l2_setup(struct tps6131x *tps6131x)
+{
+ struct v4l2_flash_config v4l2_cfg = { 0 };
+ struct led_flash_setting *intensity = &v4l2_cfg.intensity;
+
+ intensity->min = tps6131x->step_torch_current_ma;
+ intensity->max = tps6131x->max_torch_current_ma;
+ intensity->step = tps6131x->step_torch_current_ma;
+ intensity->val = intensity->min;
+
+ strscpy(v4l2_cfg.dev_name, tps6131x->fled_cdev.led_cdev.dev->kobj.name,
+ sizeof(v4l2_cfg.dev_name));
+
+ v4l2_cfg.has_external_strobe = true;
+ v4l2_cfg.flash_faults = LED_FAULT_TIMEOUT | LED_FAULT_OVER_TEMPERATURE |
+ LED_FAULT_SHORT_CIRCUIT | LED_FAULT_UNDER_VOLTAGE |
+ LED_FAULT_LED_OVER_TEMPERATURE;
+
+ tps6131x->v4l2_flash = v4l2_flash_init(tps6131x->dev, tps6131x->led_node,
+ &tps6131x->fled_cdev, &tps6131x_v4l2_flash_ops,
+ &v4l2_cfg);
+ if (IS_ERR(tps6131x->v4l2_flash)) {
+ dev_err(tps6131x->dev, "Failed to initialize v4l2 flash LED\n");
+ return PTR_ERR(tps6131x->v4l2_flash);
+ }
+
+ return 0;
+}
+
+static int tps6131x_probe(struct i2c_client *client)
+{
+ struct tps6131x *tps6131x;
+ int ret;
+
+ tps6131x = devm_kzalloc(&client->dev, sizeof(*tps6131x), GFP_KERNEL);
+ if (!tps6131x)
+ return -ENOMEM;
+
+ tps6131x->dev = &client->dev;
+ i2c_set_clientdata(client, tps6131x);
+ mutex_init(&tps6131x->lock);
+ INIT_DELAYED_WORK(&tps6131x->torch_refresh_work, tps6131x_torch_refresh_handler);
+
+ ret = tps6131x_parse_node(tps6131x);
+ if (ret)
+ return ret;
+
+ tps6131x->regmap = devm_regmap_init_i2c(client, &tps6131x_regmap);
+ if (IS_ERR(tps6131x->regmap)) {
+ ret = PTR_ERR(tps6131x->regmap);
+ return dev_err_probe(&client->dev, ret, "Failed to allocate register map\n");
+ }
+
+ tps6131x->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(tps6131x->reset_gpio)) {
+ ret = PTR_ERR(tps6131x->reset_gpio);
+ return dev_err_probe(&client->dev, ret, "Failed to get reset GPIO\n");
+ }
+
+ ret = tps6131x_reset_chip(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to reset LED controller\n");
+
+ ret = tps6131x_init_chip(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to initialize LED controller\n");
+
+ ret = tps6131x_led_class_setup(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to setup LED class\n");
+
+ ret = tps6131x_v4l2_setup(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to setup v4l2 flash\n");
+
+ return 0;
+}
+
+static void tps6131x_remove(struct i2c_client *client)
+{
+ struct tps6131x *tps6131x = i2c_get_clientdata(client);
+
+ v4l2_flash_release(tps6131x->v4l2_flash);
+
+ cancel_delayed_work_sync(&tps6131x->torch_refresh_work);
+}
+
+static const struct of_device_id of_tps6131x_leds_match[] = {
+ { .compatible = "ti,tps61310" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tps6131x_leds_match);
+
+static struct i2c_driver tps6131x_i2c_driver = {
+ .driver = {
+ .name = "tps6131x",
+ .of_match_table = of_tps6131x_leds_match,
+ },
+ .probe = tps6131x_probe,
+ .remove = tps6131x_remove,
+};
+module_i2c_driver(tps6131x_i2c_driver);
+
+MODULE_DESCRIPTION("Texas Instruments TPS6131X flash LED driver");
+MODULE_AUTHOR("Matthias Fend <matthias.fend@emfend.at>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index f4e26ce84862..165035a8826c 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -440,6 +440,21 @@ int led_update_flash_brightness(struct led_classdev_flash *fled_cdev)
}
EXPORT_SYMBOL_GPL(led_update_flash_brightness);
+int led_set_flash_duration(struct led_classdev_flash *fled_cdev, u32 duration)
+{
+ struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+ struct led_flash_setting *s = &fled_cdev->duration;
+
+ s->val = duration;
+ led_clamp_align(s);
+
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ return call_flash_op(fled_cdev, duration_set, s->val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_set_flash_duration);
+
MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
MODULE_DESCRIPTION("LED Flash class interface");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
index b2a87c994816..fd66d2bdeace 100644
--- a/drivers/leds/led-class-multicolor.c
+++ b/drivers/leds/led-class-multicolor.c
@@ -59,7 +59,8 @@ static ssize_t multi_intensity_store(struct device *dev,
for (i = 0; i < mcled_cdev->num_colors; i++)
mcled_cdev->subled_info[i].intensity = intensity_value[i];
- led_set_brightness(led_cdev, led_cdev->brightness);
+ if (!test_bit(LED_BLINK_SW, &led_cdev->work_flags))
+ led_set_brightness(led_cdev, led_cdev->brightness);
ret = size;
err_out:
mutex_unlock(&led_cdev->led_access);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 907fc703e0c5..1a59a4f38479 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -529,6 +529,7 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
struct led_properties props = {};
struct fwnode_handle *fwnode = init_data->fwnode;
const char *devicename = init_data->devicename;
+ int n;
if (!led_classdev_name)
return -EINVAL;
@@ -542,45 +543,49 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
* Otherwise the label is prepended with devicename to compose
* the final LED class device name.
*/
- if (!devicename) {
- strscpy(led_classdev_name, props.label,
- LED_MAX_NAME_SIZE);
+ if (devicename) {
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, props.label);
} else {
- snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
- devicename, props.label);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s", props.label);
}
} else if (props.function || props.color_present) {
char tmp_buf[LED_MAX_NAME_SIZE];
if (props.func_enum_present) {
- snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d",
- props.color_present ? led_colors[props.color] : "",
- props.function ?: "", props.func_enum);
+ n = snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d",
+ props.color_present ? led_colors[props.color] : "",
+ props.function ?: "", props.func_enum);
} else {
- snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s",
- props.color_present ? led_colors[props.color] : "",
- props.function ?: "");
+ n = snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s",
+ props.color_present ? led_colors[props.color] : "",
+ props.function ?: "");
}
+ if (n >= LED_MAX_NAME_SIZE)
+ return -E2BIG;
+
if (init_data->devname_mandatory) {
- snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
- devicename, tmp_buf);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, tmp_buf);
} else {
- strscpy(led_classdev_name, tmp_buf, LED_MAX_NAME_SIZE);
-
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s", tmp_buf);
}
} else if (init_data->default_label) {
if (!devicename) {
dev_err(dev, "Legacy LED naming requires devicename segment");
return -EINVAL;
}
- snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
- devicename, init_data->default_label);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, init_data->default_label);
} else if (is_of_node(fwnode)) {
- strscpy(led_classdev_name, to_of_node(fwnode)->name,
- LED_MAX_NAME_SIZE);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s",
+ to_of_node(fwnode)->name);
} else
return -EINVAL;
+ if (n >= LED_MAX_NAME_SIZE)
+ return -E2BIG;
+
return 0;
}
EXPORT_SYMBOL_GPL(led_compose_name);
diff --git a/drivers/leds/led-test.c b/drivers/leds/led-test.c
new file mode 100644
index 000000000000..ddf9aa967a6a
--- /dev/null
+++ b/drivers/leds/led-test.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Google LLC
+ *
+ * Author: Lee Jones <lee@kernel.org>
+ */
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+
+#define LED_TEST_POST_REG_BRIGHTNESS 10
+
+struct led_test_ddata {
+ struct led_classdev cdev;
+ struct device *dev;
+};
+
+static enum led_brightness led_test_brightness_get(struct led_classdev *cdev)
+{
+ return LED_TEST_POST_REG_BRIGHTNESS;
+}
+
+static void led_test_class_register(struct kunit *test)
+{
+ struct led_test_ddata *ddata = test->priv;
+ struct led_classdev *cdev_clash, *cdev = &ddata->cdev;
+ struct device *dev = ddata->dev;
+ int ret;
+
+ /* Register a LED class device */
+ cdev->name = "led-test";
+ cdev->brightness_get = led_test_brightness_get;
+ cdev->brightness = 0;
+
+ ret = devm_led_classdev_register(dev, cdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cdev->max_brightness, LED_FULL);
+ KUNIT_EXPECT_EQ(test, cdev->brightness, LED_TEST_POST_REG_BRIGHTNESS);
+ KUNIT_EXPECT_STREQ(test, cdev->dev->kobj.name, "led-test");
+
+ /* Register again with the same name - expect it to pass with the LED renamed */
+ cdev_clash = devm_kmemdup(dev, cdev, sizeof(*cdev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cdev_clash);
+
+ ret = devm_led_classdev_register(dev, cdev_clash);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_STREQ(test, cdev_clash->dev->kobj.name, "led-test_1");
+ KUNIT_EXPECT_STREQ(test, cdev_clash->name, "led-test");
+
+ /* Enable name conflict rejection and register with the same name again - expect failure */
+ cdev_clash->flags |= LED_REJECT_NAME_CONFLICT;
+ ret = devm_led_classdev_register(dev, cdev_clash);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+}
+
+static void led_test_class_add_lookup_and_get(struct kunit *test)
+{
+ struct led_test_ddata *ddata = test->priv;
+ struct led_classdev *cdev = &ddata->cdev, *cdev_get;
+ struct device *dev = ddata->dev;
+ struct led_lookup_data lookup;
+ int ret;
+
+ /* First, register a LED class device */
+ cdev->name = "led-test";
+ ret = devm_led_classdev_register(dev, cdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Then make the LED available for lookup */
+ lookup.provider = cdev->name;
+ lookup.dev_id = dev_name(dev);
+ lookup.con_id = "led-test-1";
+ led_add_lookup(&lookup);
+
+ /* Finally, attempt to look it up via the API - imagine this was an orthogonal driver */
+ cdev_get = devm_led_get(dev, "led-test-1");
+ KUNIT_ASSERT_FALSE(test, IS_ERR(cdev_get));
+
+ KUNIT_EXPECT_STREQ(test, cdev_get->name, cdev->name);
+
+ led_remove_lookup(&lookup);
+}
+
+static struct kunit_case led_test_cases[] = {
+ KUNIT_CASE(led_test_class_register),
+ KUNIT_CASE(led_test_class_add_lookup_and_get),
+ { }
+};
+
+static int led_test_init(struct kunit *test)
+{
+ struct led_test_ddata *ddata;
+ struct device *dev;
+
+ ddata = kunit_kzalloc(test, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ test->priv = ddata;
+
+ dev = kunit_device_register(test, "led_test");
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ ddata->dev = get_device(dev);
+
+ return 0;
+}
+
+static void led_test_exit(struct kunit *test)
+{
+ struct led_test_ddata *ddata = test->priv;
+
+ if (ddata && ddata->dev)
+ put_device(ddata->dev);
+}
+
+static struct kunit_suite led_test_suite = {
+ .name = "led",
+ .init = led_test_init,
+ .exit = led_test_exit,
+ .test_cases = led_test_cases,
+};
+kunit_test_suite(led_test_suite);
+
+MODULE_AUTHOR("Lee Jones <lee@kernel.org>");
+MODULE_DESCRIPTION("KUnit tests for the LED framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index b2d40f87a5ff..3799dcc1cf07 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -54,6 +54,11 @@ ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
goto unlock;
}
+ if (sysfs_streq(buf, "default")) {
+ led_trigger_set_default(led_cdev);
+ goto unlock;
+ }
+
down_read(&triggers_list_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
if (sysfs_streq(buf, trig->name) && trigger_relevant(led_cdev, trig)) {
@@ -98,6 +103,9 @@ static int led_trigger_format(char *buf, size_t size,
int len = led_trigger_snprintf(buf, size, "%s",
led_cdev->trigger ? "none" : "[none]");
+ if (led_cdev->default_trigger)
+ len += led_trigger_snprintf(buf + len, size - len, " default");
+
list_for_each_entry(trig, &trigger_list, next_trig) {
bool hit;
@@ -281,6 +289,11 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
if (!led_cdev->default_trigger)
return;
+ if (!strcmp(led_cdev->default_trigger, "none")) {
+ led_trigger_remove(led_cdev);
+ return;
+ }
+
down_read(&triggers_list_lock);
down_write(&led_cdev->trigger_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
diff --git a/drivers/leds/leds-cros_ec.c b/drivers/leds/leds-cros_ec.c
index 275522b81ea5..377cf04e202a 100644
--- a/drivers/leds/leds-cros_ec.c
+++ b/drivers/leds/leds-cros_ec.c
@@ -60,31 +60,18 @@ static inline struct cros_ec_led_priv *cros_ec_led_cdev_to_priv(struct led_class
union cros_ec_led_cmd_data {
struct ec_params_led_control req;
struct ec_response_led_control resp;
-} __packed;
+};
static int cros_ec_led_send_cmd(struct cros_ec_device *cros_ec,
union cros_ec_led_cmd_data *arg)
{
int ret;
- struct {
- struct cros_ec_command msg;
- union cros_ec_led_cmd_data data;
- } __packed buf = {
- .msg = {
- .version = 1,
- .command = EC_CMD_LED_CONTROL,
- .insize = sizeof(arg->resp),
- .outsize = sizeof(arg->req),
- },
- .data.req = arg->req
- };
-
- ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+
+ ret = cros_ec_cmd(cros_ec, 1, EC_CMD_LED_CONTROL, &arg->req,
+ sizeof(arg->req), &arg->resp, sizeof(arg->resp));
if (ret < 0)
return ret;
- arg->resp = buf.data.resp;
-
return 0;
}
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 995f2adf8569..52b97c9f2a03 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -90,8 +90,6 @@
* @led_dev: led class device pointer
* @regmap: Devices register map
* @eeprom_regmap: EEPROM register map
- * @enable_gpio: VDDIO/EN gpio to enable communication interface
- * @regulator: LED supply regulator pointer
*/
struct lp8860_led {
struct mutex lock;
@@ -99,16 +97,9 @@ struct lp8860_led {
struct led_classdev led_dev;
struct regmap *regmap;
struct regmap *eeprom_regmap;
- struct gpio_desc *enable_gpio;
- struct regulator *regulator;
-};
-
-struct lp8860_eeprom_reg {
- uint8_t reg;
- uint8_t value;
};
-static struct lp8860_eeprom_reg lp8860_eeprom_disp_regs[] = {
+static const struct reg_sequence lp8860_eeprom_disp_regs[] = {
{ LP8860_EEPROM_REG_0, 0xed },
{ LP8860_EEPROM_REG_1, 0xdf },
{ LP8860_EEPROM_REG_2, 0xdc },
@@ -136,43 +127,29 @@ static struct lp8860_eeprom_reg lp8860_eeprom_disp_regs[] = {
{ LP8860_EEPROM_REG_24, 0x3E },
};
-static int lp8860_unlock_eeprom(struct lp8860_led *led, int lock)
+static int lp8860_unlock_eeprom(struct lp8860_led *led)
{
int ret;
- mutex_lock(&led->lock);
-
- if (lock == LP8860_UNLOCK_EEPROM) {
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_EEPROM_CODE_1);
- if (ret) {
- dev_err(&led->client->dev, "EEPROM Unlock failed\n");
- goto out;
- }
-
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_EEPROM_CODE_2);
- if (ret) {
- dev_err(&led->client->dev, "EEPROM Unlock failed\n");
- goto out;
- }
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_EEPROM_CODE_3);
- if (ret) {
- dev_err(&led->client->dev, "EEPROM Unlock failed\n");
- goto out;
- }
- } else {
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_LOCK_EEPROM);
+ guard(mutex)(&led->lock);
+
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_EEPROM_CODE_1);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ return ret;
+ }
+
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_EEPROM_CODE_2);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ return ret;
+ }
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_EEPROM_CODE_3);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ return ret;
}
-out:
- mutex_unlock(&led->lock);
return ret;
}
@@ -209,47 +186,35 @@ static int lp8860_brightness_set(struct led_classdev *led_cdev,
int disp_brightness = brt_val * 255;
int ret;
- mutex_lock(&led->lock);
+ guard(mutex)(&led->lock);
ret = lp8860_fault_check(led);
if (ret) {
dev_err(&led->client->dev, "Cannot read/clear faults\n");
- goto out;
+ return ret;
}
ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_MSB,
(disp_brightness & 0xff00) >> 8);
if (ret) {
dev_err(&led->client->dev, "Cannot write CL1 MSB\n");
- goto out;
+ return ret;
}
ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_LSB,
disp_brightness & 0xff);
if (ret) {
dev_err(&led->client->dev, "Cannot write CL1 LSB\n");
- goto out;
+ return ret;
}
-out:
- mutex_unlock(&led->lock);
- return ret;
+
+ return 0;
}
static int lp8860_init(struct lp8860_led *led)
{
unsigned int read_buf;
- int ret, i, reg_count;
-
- if (led->regulator) {
- ret = regulator_enable(led->regulator);
- if (ret) {
- dev_err(&led->client->dev,
- "Failed to enable regulator\n");
- return ret;
- }
- }
-
- gpiod_direction_output(led->enable_gpio, 1);
+ int ret, reg_count;
ret = lp8860_fault_check(led);
if (ret)
@@ -259,24 +224,20 @@ static int lp8860_init(struct lp8860_led *led)
if (ret)
goto out;
- ret = lp8860_unlock_eeprom(led, LP8860_UNLOCK_EEPROM);
+ ret = lp8860_unlock_eeprom(led);
if (ret) {
dev_err(&led->client->dev, "Failed unlocking EEPROM\n");
goto out;
}
reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs);
- for (i = 0; i < reg_count; i++) {
- ret = regmap_write(led->eeprom_regmap,
- lp8860_eeprom_disp_regs[i].reg,
- lp8860_eeprom_disp_regs[i].value);
- if (ret) {
- dev_err(&led->client->dev, "Failed writing EEPROM\n");
- goto out;
- }
+ ret = regmap_multi_reg_write(led->eeprom_regmap, lp8860_eeprom_disp_regs, reg_count);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed writing EEPROM\n");
+ goto out;
}
- ret = lp8860_unlock_eeprom(led, LP8860_LOCK_EEPROM);
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_LOCK_EEPROM);
if (ret)
goto out;
@@ -291,74 +252,14 @@ static int lp8860_init(struct lp8860_led *led)
return ret;
out:
- if (ret)
- gpiod_direction_output(led->enable_gpio, 0);
-
- if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
- dev_err(&led->client->dev,
- "Failed to disable regulator\n");
- }
-
return ret;
}
-static const struct reg_default lp8860_reg_defs[] = {
- { LP8860_DISP_CL1_BRT_MSB, 0x00},
- { LP8860_DISP_CL1_BRT_LSB, 0x00},
- { LP8860_DISP_CL1_CURR_MSB, 0x00},
- { LP8860_DISP_CL1_CURR_LSB, 0x00},
- { LP8860_CL2_BRT_MSB, 0x00},
- { LP8860_CL2_BRT_LSB, 0x00},
- { LP8860_CL2_CURRENT, 0x00},
- { LP8860_CL3_BRT_MSB, 0x00},
- { LP8860_CL3_BRT_LSB, 0x00},
- { LP8860_CL3_CURRENT, 0x00},
- { LP8860_CL4_BRT_MSB, 0x00},
- { LP8860_CL4_BRT_LSB, 0x00},
- { LP8860_CL4_CURRENT, 0x00},
- { LP8860_CONFIG, 0x00},
- { LP8860_FAULT_CLEAR, 0x00},
- { LP8860_EEPROM_CNTRL, 0x80},
- { LP8860_EEPROM_UNLOCK, 0x00},
-};
-
static const struct regmap_config lp8860_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = LP8860_EEPROM_UNLOCK,
- .reg_defaults = lp8860_reg_defs,
- .num_reg_defaults = ARRAY_SIZE(lp8860_reg_defs),
-};
-
-static const struct reg_default lp8860_eeprom_defs[] = {
- { LP8860_EEPROM_REG_0, 0x00 },
- { LP8860_EEPROM_REG_1, 0x00 },
- { LP8860_EEPROM_REG_2, 0x00 },
- { LP8860_EEPROM_REG_3, 0x00 },
- { LP8860_EEPROM_REG_4, 0x00 },
- { LP8860_EEPROM_REG_5, 0x00 },
- { LP8860_EEPROM_REG_6, 0x00 },
- { LP8860_EEPROM_REG_7, 0x00 },
- { LP8860_EEPROM_REG_8, 0x00 },
- { LP8860_EEPROM_REG_9, 0x00 },
- { LP8860_EEPROM_REG_10, 0x00 },
- { LP8860_EEPROM_REG_11, 0x00 },
- { LP8860_EEPROM_REG_12, 0x00 },
- { LP8860_EEPROM_REG_13, 0x00 },
- { LP8860_EEPROM_REG_14, 0x00 },
- { LP8860_EEPROM_REG_15, 0x00 },
- { LP8860_EEPROM_REG_16, 0x00 },
- { LP8860_EEPROM_REG_17, 0x00 },
- { LP8860_EEPROM_REG_18, 0x00 },
- { LP8860_EEPROM_REG_19, 0x00 },
- { LP8860_EEPROM_REG_20, 0x00 },
- { LP8860_EEPROM_REG_21, 0x00 },
- { LP8860_EEPROM_REG_22, 0x00 },
- { LP8860_EEPROM_REG_23, 0x00 },
- { LP8860_EEPROM_REG_24, 0x00 },
};
static const struct regmap_config lp8860_eeprom_regmap_config = {
@@ -366,10 +267,15 @@ static const struct regmap_config lp8860_eeprom_regmap_config = {
.val_bits = 8,
.max_register = LP8860_EEPROM_REG_24,
- .reg_defaults = lp8860_eeprom_defs,
- .num_reg_defaults = ARRAY_SIZE(lp8860_eeprom_defs),
};
+static void lp8860_disable_gpio(void *data)
+{
+ struct gpio_desc *gpio = data;
+
+ gpiod_set_value(gpio, 0);
+}
+
static int lp8860_probe(struct i2c_client *client)
{
int ret;
@@ -377,6 +283,7 @@ static int lp8860_probe(struct i2c_client *client)
struct device_node *np = dev_of_node(&client->dev);
struct device_node *child_node;
struct led_init_data init_data = {};
+ struct gpio_desc *enable_gpio;
led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
if (!led)
@@ -386,24 +293,21 @@ static int lp8860_probe(struct i2c_client *client)
if (!child_node)
return -EINVAL;
- led->enable_gpio = devm_gpiod_get_optional(&client->dev,
- "enable", GPIOD_OUT_LOW);
- if (IS_ERR(led->enable_gpio)) {
- ret = PTR_ERR(led->enable_gpio);
- dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
- return ret;
- }
+ enable_gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(enable_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(enable_gpio),
+ "Failed to get enable GPIO\n");
+ devm_add_action_or_reset(&client->dev, lp8860_disable_gpio, enable_gpio);
- led->regulator = devm_regulator_get(&client->dev, "vled");
- if (IS_ERR(led->regulator))
- led->regulator = NULL;
+ ret = devm_regulator_get_enable_optional(&client->dev, "vled");
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to enable vled regulator\n");
led->client = client;
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
- mutex_init(&led->lock);
-
- i2c_set_clientdata(client, led);
+ devm_mutex_init(&client->dev, &led->lock);
led->regmap = devm_regmap_init_i2c(client, &lp8860_regmap_config);
if (IS_ERR(led->regmap)) {
@@ -439,23 +343,6 @@ static int lp8860_probe(struct i2c_client *client)
return 0;
}
-static void lp8860_remove(struct i2c_client *client)
-{
- struct lp8860_led *led = i2c_get_clientdata(client);
- int ret;
-
- gpiod_direction_output(led->enable_gpio, 0);
-
- if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
- dev_err(&led->client->dev,
- "Failed to disable regulator\n");
- }
-
- mutex_destroy(&led->lock);
-}
-
static const struct i2c_device_id lp8860_id[] = {
{ "lp8860" },
{ }
@@ -474,7 +361,6 @@ static struct i2c_driver lp8860_driver = {
.of_match_table = of_lp8860_leds_match,
},
.probe = lp8860_probe,
- .remove = lp8860_remove,
.id_table = lp8860_id,
};
module_i2c_driver(lp8860_driver);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 1b47acf54720..7d4c071a6cd0 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -318,7 +318,8 @@ static int pca9532_gpio_request_pin(struct gpio_chip *gc, unsigned offset)
return -EBUSY;
}
-static void pca9532_gpio_set_value(struct gpio_chip *gc, unsigned offset, int val)
+static int pca9532_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
struct pca9532_data *data = gpiochip_get_data(gc);
struct pca9532_led *led = &data->leds[offset];
@@ -329,6 +330,8 @@ static void pca9532_gpio_set_value(struct gpio_chip *gc, unsigned offset, int va
led->state = PCA9532_OFF;
pca9532_setled(led);
+
+ return 0;
}
static int pca9532_gpio_get_value(struct gpio_chip *gc, unsigned offset)
@@ -351,9 +354,7 @@ static int pca9532_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
static int pca9532_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int val)
{
- pca9532_gpio_set_value(gc, offset, val);
-
- return 0;
+ return pca9532_gpio_set_value(gc, offset, val);
}
#endif /* CONFIG_LEDS_PCA9532_GPIO */
@@ -472,7 +473,7 @@ static int pca9532_configure(struct i2c_client *client,
data->gpio.label = "gpio-pca9532";
data->gpio.direction_input = pca9532_gpio_direction_input;
data->gpio.direction_output = pca9532_gpio_direction_output;
- data->gpio.set = pca9532_gpio_set_value;
+ data->gpio.set_rv = pca9532_gpio_set_value;
data->gpio.get = pca9532_gpio_get_value;
data->gpio.request = pca9532_gpio_request_pin;
data->gpio.can_sleep = 1;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index e9cfde9fe4b1..42fe056b1c74 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -73,7 +73,7 @@ enum pca955x_type {
};
struct pca955x_chipdef {
- int bits;
+ u8 bits;
u8 slv_addr; /* 7-bit slave address mask */
int slv_addr_shift; /* Number of bits to ignore */
int blink_div; /* PSC divider */
@@ -142,13 +142,13 @@ struct pca955x_platform_data {
};
/* 8 bits per input register */
-static inline int pca955x_num_input_regs(int bits)
+static inline u8 pca955x_num_input_regs(u8 bits)
{
return (bits + 7) / 8;
}
/* 4 bits per LED selector register */
-static inline int pca955x_num_led_regs(int bits)
+static inline u8 pca955x_num_led_regs(u8 bits)
{
return (bits + 3) / 4;
}
@@ -495,10 +495,10 @@ static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset,
return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_LOW);
}
-static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
- pca955x_set_value(gc, offset, val);
+ return pca955x_set_value(gc, offset, val);
}
static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
@@ -581,14 +581,14 @@ static int pca955x_probe(struct i2c_client *client)
struct led_classdev *led;
struct led_init_data init_data;
struct i2c_adapter *adapter;
- int i, bit, err, nls, reg;
+ u8 i, nls, psc0;
u8 ls1[4];
u8 ls2[4];
struct pca955x_platform_data *pdata;
- u8 psc0;
bool keep_psc0 = false;
bool set_default_label = false;
char default_label[8];
+ int bit, err, reg;
chip = i2c_get_match_data(client);
if (!chip)
@@ -610,16 +610,15 @@ static int pca955x_probe(struct i2c_client *client)
return -ENODEV;
}
- dev_info(&client->dev, "leds-pca955x: Using %s %d-bit LED driver at "
- "slave address 0x%02x\n", client->name, chip->bits,
- client->addr);
+ dev_info(&client->dev, "Using %s %u-bit LED driver at slave address 0x%02x\n",
+ client->name, chip->bits, client->addr);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
if (pdata->num_leds != chip->bits) {
dev_err(&client->dev,
- "board info claims %d LEDs on a %d-bit chip\n",
+ "board info claims %d LEDs on a %u-bit chip\n",
pdata->num_leds, chip->bits);
return -ENODEV;
}
@@ -694,8 +693,7 @@ static int pca955x_probe(struct i2c_client *client)
}
if (set_default_label) {
- snprintf(default_label, sizeof(default_label),
- "%d", i);
+ snprintf(default_label, sizeof(default_label), "%u", i);
init_data.default_label = default_label;
} else {
init_data.default_label = NULL;
@@ -739,7 +737,7 @@ static int pca955x_probe(struct i2c_client *client)
pca955x->gpio.label = "gpio-pca955x";
pca955x->gpio.direction_input = pca955x_gpio_direction_input;
pca955x->gpio.direction_output = pca955x_gpio_direction_output;
- pca955x->gpio.set = pca955x_gpio_set_value;
+ pca955x->gpio.set_rv = pca955x_gpio_set_value;
pca955x->gpio.get = pca955x_gpio_get_value;
pca955x->gpio.request = pca955x_gpio_request_pin;
pca955x->gpio.free = pca955x_gpio_free_pin;
diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c
index 11c7bb69573e..6ad06ce2bf64 100644
--- a/drivers/leds/leds-pca995x.c
+++ b/drivers/leds/leds-pca995x.c
@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(i2c, pca995x_id);
static const struct of_device_id pca995x_of_match[] = {
{ .compatible = "nxp,pca9952", .data = &pca9952_chipdef },
- { .compatible = "nxp,pca9955b", . data = &pca9955b_chipdef },
+ { .compatible = "nxp,pca9955b", .data = &pca9955b_chipdef },
{ .compatible = "nxp,pca9956b", .data = &pca9956b_chipdef },
{},
};
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index acbd8169723c..89c165c8ee9c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -588,8 +588,8 @@ static int tca6507_blink_set(struct led_classdev *led_cdev,
}
#ifdef CONFIG_GPIOLIB
-static void tca6507_gpio_set_value(struct gpio_chip *gc,
- unsigned offset, int val)
+static int tca6507_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
struct tca6507_chip *tca = gpiochip_get_data(gc);
unsigned long flags;
@@ -604,13 +604,14 @@ static void tca6507_gpio_set_value(struct gpio_chip *gc,
spin_unlock_irqrestore(&tca->lock, flags);
if (tca->reg_set)
schedule_work(&tca->work);
+
+ return 0;
}
static int tca6507_gpio_direction_output(struct gpio_chip *gc,
unsigned offset, int val)
{
- tca6507_gpio_set_value(gc, offset, val);
- return 0;
+ return tca6507_gpio_set_value(gc, offset, val);
}
static int tca6507_probe_gpios(struct device *dev,
@@ -636,7 +637,7 @@ static int tca6507_probe_gpios(struct device *dev,
tca->gpio.base = -1;
tca->gpio.owner = THIS_MODULE;
tca->gpio.direction_output = tca6507_gpio_direction_output;
- tca->gpio.set = tca6507_gpio_set_value;
+ tca->gpio.set_rv = tca6507_gpio_set_value;
tca->gpio.parent = dev;
err = devm_gpiochip_add_data(dev, &tca->gpio, tca);
if (err) {
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 4fe1a9c0bc1b..25ee5c1eb820 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -361,7 +361,7 @@ static DEVICE_ATTR_RW(gamma_correction);
static struct attribute *omnia_led_controller_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_gamma_correction.attr,
- NULL,
+ NULL
};
ATTRIBUTE_GROUPS(omnia_led_controller);
@@ -527,7 +527,7 @@ static void omnia_leds_remove(struct i2c_client *client)
static const struct of_device_id of_omnia_leds_match[] = {
{ .compatible = "cznic,turris-omnia-leds", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_omnia_leds_match);
diff --git a/drivers/leds/rgb/leds-mt6370-rgb.c b/drivers/leds/rgb/leds-mt6370-rgb.c
index ebd3ba878dd5..c5927d0eb830 100644
--- a/drivers/leds/rgb/leds-mt6370-rgb.c
+++ b/drivers/leds/rgb/leds-mt6370-rgb.c
@@ -199,17 +199,17 @@ static const struct reg_field mt6372_reg_fields[F_MAX_FIELDS] = {
/* Current unit: microamp, time unit: millisecond */
static const struct linear_range common_led_ranges[R_MAX_RANGES] = {
- [R_LED123_CURR] = { 4000, 1, 6, 4000 },
- [R_LED4_CURR] = { 2000, 1, 3, 2000 },
- [R_LED_TRFON] = { 125, 0, 15, 200 },
- [R_LED_TOFF] = { 250, 0, 15, 400 },
+ [R_LED123_CURR] = LINEAR_RANGE(4000, 1, 6, 4000),
+ [R_LED4_CURR] = LINEAR_RANGE(2000, 1, 3, 2000),
+ [R_LED_TRFON] = LINEAR_RANGE(125, 0, 15, 200),
+ [R_LED_TOFF] = LINEAR_RANGE(250, 0, 15, 400),
};
static const struct linear_range mt6372_led_ranges[R_MAX_RANGES] = {
- [R_LED123_CURR] = { 2000, 1, 14, 2000 },
- [R_LED4_CURR] = { 2000, 1, 14, 2000 },
- [R_LED_TRFON] = { 125, 0, 15, 250 },
- [R_LED_TOFF] = { 250, 0, 15, 500 },
+ [R_LED123_CURR] = LINEAR_RANGE(2000, 1, 14, 2000),
+ [R_LED4_CURR] = LINEAR_RANGE(2000, 1, 14, 2000),
+ [R_LED_TRFON] = LINEAR_RANGE(125, 0, 15, 250),
+ [R_LED_TOFF] = LINEAR_RANGE(250, 0, 15, 500),
};
static const unsigned int common_tfreqs[] = {
diff --git a/drivers/leds/rgb/leds-ncp5623.c b/drivers/leds/rgb/leds-ncp5623.c
index f18156683375..7c7d44623a9e 100644
--- a/drivers/leds/rgb/leds-ncp5623.c
+++ b/drivers/leds/rgb/leds-ncp5623.c
@@ -155,9 +155,9 @@ static int ncp5623_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct fwnode_handle *mc_node, *led_node;
struct led_init_data init_data = { };
- int num_subleds = 0;
struct ncp5623 *ncp;
struct mc_subled *subled_info;
+ unsigned int num_subleds;
u32 color_index;
u32 reg;
int ret;
@@ -172,8 +172,7 @@ static int ncp5623_probe(struct i2c_client *client)
if (!mc_node)
return -EINVAL;
- fwnode_for_each_child_node(mc_node, led_node)
- num_subleds++;
+ num_subleds = fwnode_get_child_node_count(mc_node);
subled_info = devm_kcalloc(dev, num_subleds, sizeof(*subled_info), GFP_KERNEL);
if (!subled_info) {
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index 1c7705bafdfc..e0d7d3c9215c 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -107,12 +107,12 @@ release_fwnode:
static int led_pwm_mc_probe(struct platform_device *pdev)
{
- struct fwnode_handle *mcnode, *fwnode;
+ struct fwnode_handle *mcnode;
struct led_init_data init_data = {};
struct led_classdev *cdev;
struct mc_subled *subled;
struct pwm_mc_led *priv;
- int count = 0;
+ unsigned int count;
int ret = 0;
mcnode = device_get_named_child_node(&pdev->dev, "multi-led");
@@ -121,8 +121,7 @@ static int led_pwm_mc_probe(struct platform_device *pdev)
"expected multi-led node\n");
/* count the nodes inside the multi-led node */
- fwnode_for_each_child_node(mcnode, fwnode)
- count++;
+ count = fwnode_get_child_node_count(mcnode);
priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds, count),
GFP_KERNEL);
diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 487577d22cfc..c1f0f5becaee 100644
--- a/drivers/leds/trigger/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/fb.h>
#include <linux/leds.h>
#include "../leds.h"
@@ -21,29 +20,20 @@ struct bl_trig_notifier {
struct led_classdev *led;
int brightness;
int old_status;
- struct notifier_block notifier;
unsigned invert;
+
+ struct list_head entry;
};
-static int fb_notifier_callback(struct notifier_block *p,
- unsigned long event, void *data)
+static DEFINE_MUTEX(ledtrig_backlight_list_mutex);
+static LIST_HEAD(ledtrig_backlight_list);
+
+static void ledtrig_backlight_notify_blank(struct bl_trig_notifier *n, int new_status)
{
- struct bl_trig_notifier *n = container_of(p,
- struct bl_trig_notifier, notifier);
struct led_classdev *led = n->led;
- struct fb_event *fb_event = data;
- int *blank;
- int new_status;
-
- /* If we aren't interested in this event, skip it immediately ... */
- if (event != FB_EVENT_BLANK)
- return 0;
-
- blank = fb_event->data;
- new_status = *blank ? BLANK : UNBLANK;
if (new_status == n->old_status)
- return 0;
+ return;
if ((n->old_status == UNBLANK) ^ n->invert) {
n->brightness = led->brightness;
@@ -53,9 +43,19 @@ static int fb_notifier_callback(struct notifier_block *p,
}
n->old_status = new_status;
+}
- return 0;
+void ledtrig_backlight_blank(bool blank)
+{
+ struct bl_trig_notifier *n;
+ int new_status = blank ? BLANK : UNBLANK;
+
+ guard(mutex)(&ledtrig_backlight_list_mutex);
+
+ list_for_each_entry(n, &ledtrig_backlight_list, entry)
+ ledtrig_backlight_notify_blank(n, new_status);
}
+EXPORT_SYMBOL(ledtrig_backlight_blank);
static ssize_t bl_trig_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -100,8 +100,6 @@ ATTRIBUTE_GROUPS(bl_trig);
static int bl_trig_activate(struct led_classdev *led)
{
- int ret;
-
struct bl_trig_notifier *n;
n = kzalloc(sizeof(struct bl_trig_notifier), GFP_KERNEL);
@@ -112,11 +110,9 @@ static int bl_trig_activate(struct led_classdev *led)
n->led = led;
n->brightness = led->brightness;
n->old_status = UNBLANK;
- n->notifier.notifier_call = fb_notifier_callback;
- ret = fb_register_client(&n->notifier);
- if (ret)
- dev_err(led->dev, "unable to register backlight trigger\n");
+ guard(mutex)(&ledtrig_backlight_list_mutex);
+ list_add(&n->entry, &ledtrig_backlight_list);
return 0;
}
@@ -125,7 +121,9 @@ static void bl_trig_deactivate(struct led_classdev *led)
{
struct bl_trig_notifier *n = led_get_trigger_data(led);
- fb_unregister_client(&n->notifier);
+ guard(mutex)(&ledtrig_backlight_list_mutex);
+ list_del(&n->entry);
+
kfree(n);
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index ed52db272f4d..68eeed660a4a 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,16 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config CV1800_MBOX
+ tristate "cv1800 mailbox"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Mailbox driver implementation for Sophgo CV18XX SoCs. This driver
+ can be used to send message between different processors in SoC. Any
+ processer can write data in a channel, and set co-responding register
+ to raise interrupt to notice another processor, and it is allowed to
+ send data to itself.
+
config EXYNOS_MBOX
tristate "Exynos Mailbox"
depends on ARCH_EXYNOS || COMPILE_TEST
@@ -191,8 +201,8 @@ config POLARFIRE_SOC_MAILBOX
config MCHP_SBI_IPC_MBOX
tristate "Microchip Inter-processor Communication (IPC) SBI driver"
- depends on RISCV_SBI || COMPILE_TEST
- depends on ARCH_MICROCHIP
+ depends on RISCV_SBI
+ depends on ARCH_MICROCHIP || COMPILE_TEST
help
Mailbox implementation for Microchip devices with an
Inter-process communication (IPC) controller.
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 9a1542b55539..13a3448b3271 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o
+
obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
diff --git a/drivers/mailbox/cv1800-mailbox.c b/drivers/mailbox/cv1800-mailbox.c
new file mode 100644
index 000000000000..4761191acf78
--- /dev/null
+++ b/drivers/mailbox/cv1800-mailbox.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Yuntao Dai <d1581209858@live.com>
+ * Copyright (C) 2025 Junhui Liu <junhui.liu@pigmoral.tech>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RECV_CPU 1
+
+#define MAILBOX_MAX_CHAN 8
+#define MAILBOX_MSG_LEN 8
+
+#define MBOX_EN_REG(cpu) (cpu << 2)
+#define MBOX_DONE_REG(cpu) ((cpu << 2) + 2)
+#define MBOX_SET_CLR_REG(cpu) (0x10 + (cpu << 4))
+#define MBOX_SET_INT_REG(cpu) (0x18 + (cpu << 4))
+#define MBOX_SET_REG 0x60
+
+#define MAILBOX_CONTEXT_OFFSET 0x0400
+#define MAILBOX_CONTEXT_SIZE 0x0040
+
+#define MBOX_CONTEXT_BASE_INDEX(base, index) \
+ ((u64 __iomem *)(base + MAILBOX_CONTEXT_OFFSET) + index)
+
+/**
+ * struct cv1800_mbox_chan_priv - cv1800 mailbox channel private data
+ * @idx: index of channel
+ * @cpu: send to which processor
+ */
+struct cv1800_mbox_chan_priv {
+ int idx;
+ int cpu;
+};
+
+struct cv1800_mbox {
+ struct mbox_controller mbox;
+ struct cv1800_mbox_chan_priv priv[MAILBOX_MAX_CHAN];
+ struct mbox_chan chans[MAILBOX_MAX_CHAN];
+ u64 __iomem *content[MAILBOX_MAX_CHAN];
+ void __iomem *mbox_base;
+ int recvid;
+};
+
+static irqreturn_t cv1800_mbox_isr(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ size_t i;
+ u64 msg;
+ int ret = IRQ_NONE;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ if (mbox->content[i] && mbox->chans[i].cl) {
+ memcpy_fromio(&msg, mbox->content[i], MAILBOX_MSG_LEN);
+ mbox->content[i] = NULL;
+ mbox_chan_received_data(&mbox->chans[i], (void *)&msg);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t cv1800_mbox_irq(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ u8 set, valid;
+ size_t i;
+ int ret = IRQ_NONE;
+
+ set = readb(mbox->mbox_base + MBOX_SET_INT_REG(RECV_CPU));
+
+ if (!set)
+ return ret;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ valid = set & BIT(i);
+ if (valid) {
+ mbox->content[i] =
+ MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, i);
+ writeb(valid, mbox->mbox_base +
+ MBOX_SET_CLR_REG(RECV_CPU));
+ writeb(~valid, mbox->mbox_base + MBOX_EN_REG(RECV_CPU));
+ ret = IRQ_WAKE_THREAD;
+ }
+ }
+
+ return ret;
+}
+
+static int cv1800_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ int idx = priv->idx;
+ int cpu = priv->cpu;
+ u8 en, valid;
+
+ memcpy_toio(MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, idx),
+ data, MAILBOX_MSG_LEN);
+
+ valid = BIT(idx);
+ writeb(valid, mbox->mbox_base + MBOX_SET_CLR_REG(cpu));
+ en = readb(mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(en | valid, mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(valid, mbox->mbox_base + MBOX_SET_REG);
+
+ return 0;
+}
+
+static bool cv1800_last_tx_done(struct mbox_chan *chan)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ u8 en;
+
+ en = readb(mbox->mbox_base + MBOX_EN_REG(priv->cpu));
+
+ return !(en & BIT(priv->idx));
+}
+
+static const struct mbox_chan_ops cv1800_mbox_chan_ops = {
+ .send_data = cv1800_mbox_send_data,
+ .last_tx_done = cv1800_last_tx_done,
+};
+
+static struct mbox_chan *cv1800_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct cv1800_mbox_chan_priv *priv;
+
+ int idx = spec->args[0];
+ int cpu = spec->args[1];
+
+ if (idx >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ priv = mbox->chans[idx].con_priv;
+ priv->cpu = cpu;
+
+ return &mbox->chans[idx];
+}
+
+static const struct of_device_id cv1800_mbox_of_match[] = {
+ { .compatible = "sophgo,cv1800b-mailbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cv1800_mbox_of_match);
+
+static int cv1800_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800_mbox *mb;
+ int irq, idx, err;
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mb->mbox_base))
+ return dev_err_probe(dev, PTR_ERR(mb->mbox_base),
+ "Failed to map resource\n");
+
+ mb->mbox.dev = dev;
+ mb->mbox.chans = mb->chans;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.ops = &cv1800_mbox_chan_ops;
+ mb->mbox.num_chans = MAILBOX_MAX_CHAN;
+ mb->mbox.of_xlate = cv1800_mbox_xlate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_threaded_irq(dev, irq, cv1800_mbox_irq,
+ cv1800_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to register irq\n");
+
+ for (idx = 0; idx < MAILBOX_MAX_CHAN; idx++) {
+ mb->priv[idx].idx = idx;
+ mb->mbox.chans[idx].con_priv = &mb->priv[idx];
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ err = devm_mbox_controller_register(dev, &mb->mbox);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register mailbox\n");
+
+ return 0;
+}
+
+static struct platform_driver cv1800_mbox_driver = {
+ .driver = {
+ .name = "cv1800-mbox",
+ .of_match_table = cv1800_mbox_of_match,
+ },
+ .probe = cv1800_mbox_probe,
+};
+
+module_platform_driver(cv1800_mbox_driver);
+
+MODULE_DESCRIPTION("cv1800 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 6ef8338add0d..6778afc64a04 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -226,7 +226,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
{
u32 *arg = data;
u32 val;
- int ret;
+ int ret, count;
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -240,11 +240,20 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
case IMX_MU_TYPE_TXDB_V2:
imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
priv->dcfg->xCR[IMX_MU_GCR]);
- ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
- !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
- 0, 1000);
- if (ret)
- dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type);
+ ret = -ETIMEDOUT;
+ count = 0;
+ while (ret && (count < 10)) {
+ ret =
+ readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
+ !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
+ 0, 10000);
+
+ if (ret) {
+ dev_warn_ratelimited(priv->dev,
+ "channel type: %d timeout, %d times, retry\n",
+ cp->type, ++count);
+ }
+ }
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 0593b4d03685..5cd8ae222073 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -6,6 +6,7 @@
* Author: Jassi Brar <jassisinghbrar@gmail.com>
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -24,15 +25,12 @@ static DEFINE_MUTEX(con_mutex);
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ guard(spinlock_irqsave)(&chan->lock);
/* See if there is any space left */
- if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN)
return -ENOBUFS;
- }
idx = chan->msg_free;
chan->msg_data[idx] = mssg;
@@ -43,60 +41,53 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
else
chan->msg_free++;
- spin_unlock_irqrestore(&chan->lock, flags);
-
return idx;
}
static void msg_submit(struct mbox_chan *chan)
{
unsigned count, idx;
- unsigned long flags;
void *data;
int err = -EBUSY;
- spin_lock_irqsave(&chan->lock, flags);
-
- if (!chan->msg_count || chan->active_req)
- goto exit;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ if (!chan->msg_count || chan->active_req)
+ break;
- count = chan->msg_count;
- idx = chan->msg_free;
- if (idx >= count)
- idx -= count;
- else
- idx += MBOX_TX_QUEUE_LEN - count;
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
- data = chan->msg_data[idx];
+ data = chan->msg_data[idx];
- if (chan->cl->tx_prepare)
- chan->cl->tx_prepare(chan->cl, data);
- /* Try to submit a message to the MBOX controller */
- err = chan->mbox->ops->send_data(chan, data);
- if (!err) {
- chan->active_req = data;
- chan->msg_count--;
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
}
-exit:
- spin_unlock_irqrestore(&chan->lock, flags);
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
/* kick start the timer immediately to avoid delays */
- spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
- spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock)
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
}
}
static void tx_tick(struct mbox_chan *chan, int r)
{
- unsigned long flags;
void *mssg;
- spin_lock_irqsave(&chan->lock, flags);
- mssg = chan->active_req;
- chan->active_req = NULL;
- spin_unlock_irqrestore(&chan->lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ }
/* Submit next message */
msg_submit(chan);
@@ -118,7 +109,6 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
- unsigned long flags;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
@@ -133,10 +123,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
}
if (resched) {
- spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
- if (!hrtimer_is_queued(hrtimer))
- hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
- spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) {
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ }
return HRTIMER_RESTART;
}
@@ -318,25 +308,23 @@ EXPORT_SYMBOL_GPL(mbox_flush);
static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
struct device *dev = cl->dev;
- unsigned long flags;
int ret;
if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
- dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ dev_err(dev, "%s: mailbox not free\n", __func__);
return -EBUSY;
}
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
-
- if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method = TXDONE_BY_ACK;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+ }
if (chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
@@ -370,13 +358,9 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
*/
int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
- int ret;
-
- mutex_lock(&con_mutex);
- ret = __mbox_bind_client(chan, cl);
- mutex_unlock(&con_mutex);
+ guard(mutex)(&con_mutex);
- return ret;
+ return __mbox_bind_client(chan, cl);
}
EXPORT_SYMBOL_GPL(mbox_bind_client);
@@ -413,32 +397,29 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
index, &spec);
if (ret) {
- dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
return ERR_PTR(ret);
}
- mutex_lock(&con_mutex);
+ scoped_guard(mutex, &con_mutex) {
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
- chan = ERR_PTR(-EPROBE_DEFER);
- list_for_each_entry(mbox, &mbox_cons, node)
- if (mbox->dev->of_node == spec.np) {
- chan = mbox->of_xlate(mbox, &spec);
- if (!IS_ERR(chan))
- break;
- }
+ of_node_put(spec.np);
- of_node_put(spec.np);
+ if (IS_ERR(chan))
+ return chan;
- if (IS_ERR(chan)) {
- mutex_unlock(&con_mutex);
- return chan;
+ ret = __mbox_bind_client(chan, cl);
+ if (ret)
+ chan = ERR_PTR(ret);
}
- ret = __mbox_bind_client(chan, cl);
- if (ret)
- chan = ERR_PTR(ret);
-
- mutex_unlock(&con_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(mbox_request_channel);
@@ -458,7 +439,7 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
if (index < 0) {
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(index);
}
return mbox_request_channel(cl, index);
}
@@ -471,8 +452,6 @@ EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
*/
void mbox_free_channel(struct mbox_chan *chan)
{
- unsigned long flags;
-
if (!chan || !chan->cl)
return;
@@ -480,14 +459,14 @@ void mbox_free_channel(struct mbox_chan *chan)
chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
- spin_lock_irqsave(&chan->lock, flags);
- chan->cl = NULL;
- chan->active_req = NULL;
- if (chan->txdone_method == TXDONE_BY_ACK)
- chan->txdone_method = TXDONE_BY_POLL;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+ }
module_put(chan->mbox->dev->driver->owner);
- spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
@@ -547,9 +526,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
if (!mbox->of_xlate)
mbox->of_xlate = of_mbox_index_xlate;
- mutex_lock(&con_mutex);
- list_add_tail(&mbox->node, &mbox_cons);
- mutex_unlock(&con_mutex);
+ scoped_guard(mutex, &con_mutex)
+ list_add_tail(&mbox->node, &mbox_cons);
return 0;
}
@@ -566,17 +544,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox)
if (!mbox)
return;
- mutex_lock(&con_mutex);
-
- list_del(&mbox->node);
-
- for (i = 0; i < mbox->num_chans; i++)
- mbox_free_channel(&mbox->chans[i]);
+ scoped_guard(mutex, &con_mutex) {
+ list_del(&mbox->node);
- if (mbox->txdone_poll)
- hrtimer_cancel(&mbox->poll_hrt);
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
- mutex_unlock(&con_mutex);
+ if (mbox->txdone_poll)
+ hrtimer_cancel(&mbox->poll_hrt);
+ }
}
EXPORT_SYMBOL_GPL(mbox_controller_unregister);
@@ -587,16 +563,6 @@ static void __devm_mbox_controller_unregister(struct device *dev, void *res)
mbox_controller_unregister(*mbox);
}
-static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
-{
- struct mbox_controller **mbox = res;
-
- if (WARN_ON(!mbox || !*mbox))
- return 0;
-
- return *mbox == data;
-}
-
/**
* devm_mbox_controller_register() - managed mbox_controller_register()
* @dev: device owning the mailbox controller being registered
@@ -632,20 +598,3 @@ int devm_mbox_controller_register(struct device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
-
-/**
- * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
- * @dev: device owning the mailbox controller being unregistered
- * @mbox: mailbox controller being unregistered
- *
- * This function unregisters the mailbox controller and removes the device-
- * managed resource that was set up to automatically unregister the mailbox
- * controller on driver probe failure or driver removal. It's typically not
- * necessary to call this function.
- */
-void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
-{
- WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
- devm_mbox_controller_match, mbox));
-}
-EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index d186865b8dce..ab4e8d1954a1 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -92,18 +92,6 @@ struct gce_plat {
u32 gce_num;
};
-static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
-{
- WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
-
- if (enable)
- writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
- else
- writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
-
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
-}
-
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
@@ -112,6 +100,19 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan)
}
EXPORT_SYMBOL(cmdq_get_shift_pa);
+static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable)
+{
+ u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0;
+
+ if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en)
+ return;
+
+ if (cmdq->pdata->sw_ddr_en && ddr_enable)
+ val |= GCE_DDR_EN;
+
+ writel(val, cmdq->base + GCE_GCTL_VALUE);
+}
+
static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
{
u32 status;
@@ -140,16 +141,10 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
static void cmdq_init(struct cmdq *cmdq)
{
int i;
- u32 gctl_regval = 0;
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
- if (cmdq->pdata->control_by_sw)
- gctl_regval = GCE_CTRL_BY_SW;
- if (cmdq->pdata->sw_ddr_en)
- gctl_regval |= GCE_DDR_EN;
- if (gctl_regval)
- writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
+ cmdq_gctl_value_toggle(cmdq, true);
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
@@ -315,14 +310,21 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
static int cmdq_runtime_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ int ret;
- return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ if (ret)
+ return ret;
+
+ cmdq_gctl_value_toggle(cmdq, true);
+ return 0;
}
static int cmdq_runtime_suspend(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ cmdq_gctl_value_toggle(cmdq, false);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
@@ -347,9 +349,6 @@ static int cmdq_suspend(struct device *dev)
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
return pm_runtime_force_suspend(dev);
}
@@ -360,9 +359,6 @@ static int cmdq_resume(struct device *dev)
WARN_ON(pm_runtime_force_resume(dev));
cmdq->suspended = false;
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, true);
-
return 0;
}
@@ -370,9 +366,6 @@ static void cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
if (!IS_ENABLED(CONFIG_PM))
cmdq_runtime_suspend(&pdev->dev);
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 11c41e935a36..8b24ec0fa191 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -116,10 +116,18 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
}
if (apcs_data->clk_name) {
- apcs->clk = platform_device_register_data(&pdev->dev,
- apcs_data->clk_name,
- PLATFORM_DEVID_AUTO,
- NULL, 0);
+ struct device_node *np = of_get_child_by_name(pdev->dev.of_node,
+ "clock-controller");
+ struct platform_device_info pdevinfo = {
+ .parent = &pdev->dev,
+ .name = apcs_data->clk_name,
+ .id = PLATFORM_DEVID_AUTO,
+ .fwnode = of_fwnode_handle(np) ?: pdev->dev.fwnode,
+ .of_node_reused = !np,
+ };
+
+ apcs->clk = platform_device_register_full(&pdevinfo);
+ of_node_put(np);
if (IS_ERR(apcs->clk))
dev_err(&pdev->dev, "failed to register APCS clk\n");
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ed40d8600656..2cc2eb24dc8a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -36,6 +36,7 @@
#include <linux/sched/clock.h>
#include <linux/rculist.h>
#include <linux/delay.h>
+#include <linux/sort.h>
#include <trace/events/bcache.h>
/*
@@ -559,8 +560,6 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
}
}
-#define cmp_int(l, r) ((l > r) - (l < r))
-
#ifdef CONFIG_PROVE_LOCKING
static int btree_lock_cmp_fn(const struct lockdep_map *_a,
const struct lockdep_map *_b)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index d098e75e3461..ec84ba5e93e5 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -41,16 +41,6 @@
#define DM_BUFIO_LOW_WATERMARK_RATIO 16
/*
- * Check buffer ages in this interval (seconds)
- */
-#define DM_BUFIO_WORK_TIMER_SECS 30
-
-/*
- * Free buffers when they are older than this (seconds)
- */
-#define DM_BUFIO_DEFAULT_AGE_SECS 300
-
-/*
* The nr of bytes of cached data to keep around.
*/
#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
@@ -1057,10 +1047,8 @@ static unsigned long dm_bufio_cache_size_latch;
static DEFINE_SPINLOCK(global_spinlock);
-/*
- * Buffers are freed after this timeout
- */
-static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
+static unsigned int dm_bufio_max_age; /* No longer does anything */
+
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
@@ -1088,7 +1076,6 @@ static LIST_HEAD(dm_bufio_all_clients);
static DEFINE_MUTEX(dm_bufio_clients_lock);
static struct workqueue_struct *dm_bufio_wq;
-static struct delayed_work dm_bufio_cleanup_old_work;
static struct work_struct dm_bufio_replacement_work;
@@ -2680,130 +2667,6 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
/*--------------------------------------------------------------*/
-static unsigned int get_max_age_hz(void)
-{
- unsigned int max_age = READ_ONCE(dm_bufio_max_age);
-
- if (max_age > UINT_MAX / HZ)
- max_age = UINT_MAX / HZ;
-
- return max_age * HZ;
-}
-
-static bool older_than(struct dm_buffer *b, unsigned long age_hz)
-{
- return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
-}
-
-struct evict_params {
- gfp_t gfp;
- unsigned long age_hz;
-
- /*
- * This gets updated with the largest last_accessed (ie. most
- * recently used) of the evicted buffers. It will not be reinitialised
- * by __evict_many(), so you can use it across multiple invocations.
- */
- unsigned long last_accessed;
-};
-
-/*
- * We may not be able to evict this buffer if IO pending or the client
- * is still using it.
- *
- * And if GFP_NOFS is used, we must not do any I/O because we hold
- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
- * rerouted to different bufio client.
- */
-static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
-{
- struct evict_params *params = context;
-
- if (!(params->gfp & __GFP_FS) ||
- (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
- if (test_bit_acquire(B_READING, &b->state) ||
- test_bit(B_WRITING, &b->state) ||
- test_bit(B_DIRTY, &b->state))
- return ER_DONT_EVICT;
- }
-
- return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
-}
-
-static unsigned long __evict_many(struct dm_bufio_client *c,
- struct evict_params *params,
- int list_mode, unsigned long max_count)
-{
- unsigned long count;
- unsigned long last_accessed;
- struct dm_buffer *b;
-
- for (count = 0; count < max_count; count++) {
- b = cache_evict(&c->cache, list_mode, select_for_evict, params);
- if (!b)
- break;
-
- last_accessed = READ_ONCE(b->last_accessed);
- if (time_after_eq(params->last_accessed, last_accessed))
- params->last_accessed = last_accessed;
-
- __make_buffer_clean(b);
- __free_buffer_wake(b);
-
- cond_resched();
- }
-
- return count;
-}
-
-static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
-{
- struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
- unsigned long retain = get_retain_buffers(c);
- unsigned long count;
- LIST_HEAD(write_list);
-
- dm_bufio_lock(c);
-
- __check_watermark(c, &write_list);
- if (unlikely(!list_empty(&write_list))) {
- dm_bufio_unlock(c);
- __flush_write_list(&write_list);
- dm_bufio_lock(c);
- }
-
- count = cache_total(&c->cache);
- if (count > retain)
- __evict_many(c, &params, LIST_CLEAN, count - retain);
-
- dm_bufio_unlock(c);
-}
-
-static void cleanup_old_buffers(void)
-{
- unsigned long max_age_hz = get_max_age_hz();
- struct dm_bufio_client *c;
-
- mutex_lock(&dm_bufio_clients_lock);
-
- __cache_size_refresh();
-
- list_for_each_entry(c, &dm_bufio_all_clients, client_list)
- evict_old_buffers(c, max_age_hz);
-
- mutex_unlock(&dm_bufio_clients_lock);
-}
-
-static void work_fn(struct work_struct *w)
-{
- cleanup_old_buffers();
-
- queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
- DM_BUFIO_WORK_TIMER_SECS * HZ);
-}
-
-/*--------------------------------------------------------------*/
-
/*
* Global cleanup tries to evict the oldest buffers from across _all_
* the clients. It does this by repeatedly evicting a few buffers from
@@ -2841,27 +2704,51 @@ static void __insert_client(struct dm_bufio_client *new_client)
list_add_tail(&new_client->client_list, h);
}
+static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
+{
+ /* In no-sleep mode, we cannot wait on IO. */
+ if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) {
+ if (test_bit_acquire(B_READING, &b->state) ||
+ test_bit(B_WRITING, &b->state) ||
+ test_bit(B_DIRTY, &b->state))
+ return ER_DONT_EVICT;
+ }
+ return ER_EVICT;
+}
+
static unsigned long __evict_a_few(unsigned long nr_buffers)
{
- unsigned long count;
struct dm_bufio_client *c;
- struct evict_params params = {
- .gfp = GFP_KERNEL,
- .age_hz = 0,
- /* set to jiffies in case there are no buffers in this client */
- .last_accessed = jiffies
- };
+ unsigned long oldest_buffer = jiffies;
+ unsigned long last_accessed;
+ unsigned long count;
+ struct dm_buffer *b;
c = __pop_client();
if (!c)
return 0;
dm_bufio_lock(c);
- count = __evict_many(c, &params, LIST_CLEAN, nr_buffers);
+
+ for (count = 0; count < nr_buffers; count++) {
+ b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL);
+ if (!b)
+ break;
+
+ last_accessed = READ_ONCE(b->last_accessed);
+ if (time_after_eq(oldest_buffer, last_accessed))
+ oldest_buffer = last_accessed;
+
+ __make_buffer_clean(b);
+ __free_buffer_wake(b);
+
+ cond_resched();
+ }
+
dm_bufio_unlock(c);
if (count)
- c->oldest_buffer = params.last_accessed;
+ c->oldest_buffer = oldest_buffer;
__insert_client(c);
return count;
@@ -2944,10 +2831,7 @@ static int __init dm_bufio_init(void)
if (!dm_bufio_wq)
return -ENOMEM;
- INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
- queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
- DM_BUFIO_WORK_TIMER_SECS * HZ);
return 0;
}
@@ -2959,7 +2843,6 @@ static void __exit dm_bufio_exit(void)
{
int bug = 0;
- cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
destroy_workqueue(dm_bufio_wq);
if (dm_bufio_client_count) {
@@ -2996,7 +2879,7 @@ module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
-MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
+MODULE_PARM_DESC(max_age_seconds, "No longer does anything");
module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 3637761f3585..c889332e533b 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -141,6 +141,7 @@ struct mapped_device {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_zones;
void *zone_revalidate_map;
+ struct task_struct *revalidate_map_task;
#endif
#ifdef CONFIG_IMA
@@ -162,9 +163,6 @@ struct mapped_device {
#define DMF_POST_SUSPENDING 8
#define DMF_EMULATE_ZONE_APPEND 9
-void disable_discard(struct mapped_device *md);
-void disable_write_zeroes(struct mapped_device *md);
-
static inline sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index d4cf0ac2a7aa..16d3d454fb0a 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -14,11 +14,14 @@
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/delay.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "delay"
+#define SLEEP_SHIFT 3
+
struct delay_class {
struct dm_dev *dev;
sector_t start;
@@ -34,6 +37,7 @@ struct delay_c {
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
struct task_struct *worker;
+ unsigned int worker_sleep_us;
bool may_delay;
struct delay_class read;
@@ -136,6 +140,7 @@ static int flush_worker_fn(void *data)
schedule();
} else {
spin_unlock(&dc->delayed_bios_lock);
+ fsleep(dc->worker_sleep_us);
cond_resched();
}
}
@@ -212,7 +217,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct delay_c *dc;
int ret;
- unsigned int max_delay;
+ unsigned int max_delay, min_delay;
if (argc != 3 && argc != 6 && argc != 9) {
ti->error = "Requires exactly 3, 6 or 9 arguments";
@@ -235,7 +240,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ret = delay_class_ctr(ti, &dc->read, argv);
if (ret)
goto bad;
- max_delay = dc->read.delay;
+ min_delay = max_delay = dc->read.delay;
if (argc == 3) {
ret = delay_class_ctr(ti, &dc->write, argv);
@@ -251,6 +256,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret)
goto bad;
max_delay = max(max_delay, dc->write.delay);
+ min_delay = min_not_zero(min_delay, dc->write.delay);
if (argc == 6) {
ret = delay_class_ctr(ti, &dc->flush, argv + 3);
@@ -263,9 +269,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret)
goto bad;
max_delay = max(max_delay, dc->flush.delay);
+ min_delay = min_not_zero(min_delay, dc->flush.delay);
out:
if (max_delay < 50) {
+ if (min_delay >> SLEEP_SHIFT)
+ dc->worker_sleep_us = 1000;
+ else
+ dc->worker_sleep_us = (min_delay * 1000) >> SLEEP_SHIFT;
/*
* In case of small requested delays, use kthread instead of
* timers and workqueue to achieve better latency.
@@ -438,7 +449,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = delay_ctr,
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 1a33820c9f46..e75310232bbf 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -534,7 +534,9 @@ static void dust_status(struct dm_target *ti, status_type_t type,
}
}
-static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct dust_device *dd = ti->private;
struct dm_dev *dev = dd->dev;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index b19b0142a690..6abb31ca9662 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -415,7 +415,8 @@ static void ebs_status(struct dm_target *ti, status_type_t type,
}
}
-static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg, bool *forward)
{
struct ebs_c *ec = ti->private;
struct dm_dev *dev = ec->dev;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b690905ab89f..c711db6f8f5c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -47,14 +47,15 @@ enum feature_flag_bits {
};
struct per_bio_data {
- bool bio_submitted;
+ bool bio_can_corrupt;
+ struct bvec_iter saved_iter;
};
static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
- int r;
- unsigned int argc;
+ int r = 0;
+ unsigned int argc = 0;
const char *arg_name;
static const struct dm_arg _args[] = {
@@ -65,14 +66,13 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
{0, PROBABILITY_BASE, "Invalid random corrupt argument"},
};
- /* No feature arguments supplied. */
- if (!as->argc)
- return 0;
-
- r = dm_read_arg_group(_args, as, &argc, &ti->error);
- if (r)
+ if (as->argc && (r = dm_read_arg_group(_args, as, &argc, &ti->error)))
return r;
+ /* No feature arguments supplied. */
+ if (!argc)
+ goto error_all_io;
+
while (argc) {
arg_name = dm_shift_arg(as);
argc--;
@@ -128,8 +128,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
*/
if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
- if (!argc) {
- ti->error = "Feature corrupt_bio_byte requires parameters";
+ if (fc->corrupt_bio_byte) {
+ ti->error = "Feature corrupt_bio_byte duplicated";
+ return -EINVAL;
+ } else if (argc < 4) {
+ ti->error = "Feature corrupt_bio_byte requires 4 parameters";
return -EINVAL;
}
@@ -176,7 +179,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
}
if (!strcasecmp(arg_name, "random_read_corrupt")) {
- if (!argc) {
+ if (fc->random_read_corrupt) {
+ ti->error = "Feature random_read_corrupt duplicated";
+ return -EINVAL;
+ } else if (!argc) {
ti->error = "Feature random_read_corrupt requires a parameter";
return -EINVAL;
}
@@ -189,7 +195,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
}
if (!strcasecmp(arg_name, "random_write_corrupt")) {
- if (!argc) {
+ if (fc->random_write_corrupt) {
+ ti->error = "Feature random_write_corrupt duplicated";
+ return -EINVAL;
+ } else if (!argc) {
ti->error = "Feature random_write_corrupt requires a parameter";
return -EINVAL;
}
@@ -205,18 +214,25 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
return -EINVAL;
}
- if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
- ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ if (test_bit(DROP_WRITES, &fc->flags) &&
+ (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
- } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
- ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ } else if (test_bit(ERROR_WRITES, &fc->flags) &&
+ (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
+ } else if (test_bit(ERROR_READS, &fc->flags) &&
+ (fc->corrupt_bio_rw == READ || fc->random_read_corrupt)) {
+ ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set";
return -EINVAL;
}
if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
!test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) &&
!fc->random_read_corrupt && !fc->random_write_corrupt) {
+error_all_io:
set_bit(ERROR_WRITES, &fc->flags);
set_bit(ERROR_READS, &fc->flags);
}
@@ -278,7 +294,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
- r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
+ r = dm_read_arg(_args + 1, &as, &fc->down_interval, &ti->error);
if (r)
goto bad;
@@ -339,7 +355,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
}
static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
- unsigned char corrupt_bio_value)
+ unsigned char corrupt_bio_value,
+ struct bvec_iter start)
{
struct bvec_iter iter;
struct bio_vec bvec;
@@ -348,7 +365,7 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
* Overwrite the Nth byte of the bio's data, on whichever page
* it falls.
*/
- bio_for_each_segment(bvec, bio, iter) {
+ __bio_for_each_segment(bvec, bio, iter, start) {
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
unsigned char *segment = bvec_kmap_local(&bvec);
segment[corrupt_bio_byte] = corrupt_bio_value;
@@ -357,36 +374,31 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
bio, corrupt_bio_value, corrupt_bio_byte,
(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
- (unsigned long long)bio->bi_iter.bi_sector,
- bio->bi_iter.bi_size);
+ (unsigned long long)start.bi_sector,
+ start.bi_size);
break;
}
corrupt_bio_byte -= bio_iter_len(bio, iter);
}
}
-static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc,
+ struct bvec_iter start)
{
unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
- if (!bio_has_data(bio))
- return;
-
- corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value);
+ corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start);
}
-static void corrupt_bio_random(struct bio *bio)
+static void corrupt_bio_random(struct bio *bio, struct bvec_iter start)
{
unsigned int corrupt_byte;
unsigned char corrupt_value;
- if (!bio_has_data(bio))
- return;
-
- corrupt_byte = get_random_u32() % bio->bi_iter.bi_size;
+ corrupt_byte = get_random_u32() % start.bi_size;
corrupt_value = get_random_u8();
- corrupt_bio_common(bio, corrupt_byte, corrupt_value);
+ corrupt_bio_common(bio, corrupt_byte, corrupt_value, start);
}
static void clone_free(struct bio *clone)
@@ -481,7 +493,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
unsigned int elapsed;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
- pb->bio_submitted = false;
+ pb->bio_can_corrupt = false;
if (op_is_zone_mgmt(bio_op(bio)))
goto map_bio;
@@ -490,14 +502,15 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
elapsed = (jiffies - fc->start_time) / HZ;
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
bool corrupt_fixed, corrupt_random;
- /*
- * Flag this bio as submitted while down.
- */
- pb->bio_submitted = true;
+
+ if (bio_has_data(bio)) {
+ pb->bio_can_corrupt = true;
+ pb->saved_iter = bio->bi_iter;
+ }
/*
- * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
- * Otherwise, flakey_end_io() will decide if the reads should be modified.
+ * If ERROR_READS isn't set flakey_end_io() will decide if the
+ * reads should be modified.
*/
if (bio_data_dir(bio) == READ) {
if (test_bit(ERROR_READS, &fc->flags))
@@ -516,6 +529,8 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+ if (!pb->bio_can_corrupt)
+ goto map_bio;
/*
* Corrupt matching writes.
*/
@@ -535,9 +550,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
struct bio *clone = clone_bio(ti, fc, bio);
if (clone) {
if (corrupt_fixed)
- corrupt_bio_data(clone, fc);
+ corrupt_bio_data(clone, fc,
+ clone->bi_iter);
if (corrupt_random)
- corrupt_bio_random(clone);
+ corrupt_bio_random(clone,
+ clone->bi_iter);
submit_bio(clone);
return DM_MAPIO_SUBMITTED;
}
@@ -559,28 +576,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
if (op_is_zone_mgmt(bio_op(bio)))
return DM_ENDIO_DONE;
- if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+ if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) {
if (fc->corrupt_bio_byte) {
if ((fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc)) {
/*
* Corrupt successful matching READs while in down state.
*/
- corrupt_bio_data(bio, fc);
+ corrupt_bio_data(bio, fc, pb->saved_iter);
}
}
if (fc->random_read_corrupt) {
u64 rnd = get_random_u64();
u32 rem = do_div(rnd, PROBABILITY_BASE);
if (rem < fc->random_read_corrupt)
- corrupt_bio_random(bio);
- }
- if (test_bit(ERROR_READS, &fc->flags)) {
- /*
- * Error read during the down_interval if drop_writes
- * and error_writes were not configured.
- */
- *error = BLK_STS_IOERR;
+ corrupt_bio_random(bio, pb->saved_iter);
}
}
@@ -638,7 +648,9 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
}
}
-static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct flakey_c *fc = ti->private;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index d42eac944eb5..4165fef4c170 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1885,6 +1885,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry},
{DM_DEV_ARM_POLL_CMD, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll},
{DM_GET_TARGET_VERSION_CMD, 0, get_target_version},
+ {DM_MPATH_PROBE_PATHS_CMD, 0, NULL}, /* block device ioctl */
};
if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 66318aba4bdb..15538ec58f8e 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -119,7 +119,9 @@ static void linear_status(struct dm_target *ti, status_type_t type,
}
}
-static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct linear_c *lc = ti->private;
struct dm_dev *dev = lc->dev;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 8d7df8303d0a..d484e8e1d48a 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -818,7 +818,9 @@ static void log_writes_status(struct dm_target *ti, status_type_t type,
}
static int log_writes_prepare_ioctl(struct dm_target *ti,
- struct block_device **bdev)
+ struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct log_writes_c *lc = ti->private;
struct dm_dev *dev = lc->dev;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6c98f4ae5ea9..81fec2e1e0ef 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -79,6 +79,7 @@ struct multipath {
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
+ struct priority_group *last_probed_pg;
atomic_t nr_valid_paths; /* Total number of usable paths */
unsigned int nr_priority_groups;
@@ -87,6 +88,7 @@ struct multipath {
const char *hw_handler_name;
char *hw_handler_params;
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+ wait_queue_head_t probe_wait; /* Wait for probing paths */
unsigned int pg_init_retries; /* Number of times to retry pg_init */
unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
@@ -100,6 +102,7 @@ struct multipath {
struct bio_list queued_bios;
struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
+ bool is_suspending;
};
/*
@@ -132,6 +135,8 @@ static void queue_if_no_path_timeout_work(struct timer_list *t);
#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
+#define MPATHF_DELAY_PG_SWITCH 7 /* Delay switching pg if it still has paths */
+#define MPATHF_NEED_PG_SWITCH 8 /* Need to switch pgs after the delay has ended */
static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
{
@@ -254,6 +259,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
atomic_set(&m->pg_init_count, 0);
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
init_waitqueue_head(&m->pg_init_wait);
+ init_waitqueue_head(&m->probe_wait);
return 0;
}
@@ -413,13 +419,21 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
goto failed;
}
+ /* Don't change PG until it has no remaining paths */
+ pg = READ_ONCE(m->current_pg);
+ if (pg) {
+ pgpath = choose_path_in_pg(m, pg, nr_bytes);
+ if (!IS_ERR_OR_NULL(pgpath))
+ return pgpath;
+ }
+
/* Were we instructed to switch PG? */
if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
if (!pg) {
spin_unlock_irqrestore(&m->lock, flags);
- goto check_current_pg;
+ goto check_all_pgs;
}
m->next_pg = NULL;
spin_unlock_irqrestore(&m->lock, flags);
@@ -427,16 +441,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
if (!IS_ERR_OR_NULL(pgpath))
return pgpath;
}
-
- /* Don't change PG until it has no remaining paths */
-check_current_pg:
- pg = READ_ONCE(m->current_pg);
- if (pg) {
- pgpath = choose_path_in_pg(m, pg, nr_bytes);
- if (!IS_ERR_OR_NULL(pgpath))
- return pgpath;
- }
-
+check_all_pgs:
/*
* Loop through priority groups until we find a valid path.
* First time we skip PGs marked 'bypassed'.
@@ -612,7 +617,6 @@ static void multipath_queue_bio(struct multipath *m, struct bio *bio)
static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
{
struct pgpath *pgpath;
- unsigned long flags;
/* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath);
@@ -620,12 +624,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
if (!pgpath) {
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
__multipath_queue_bio(m, bio);
pgpath = ERR_PTR(-EAGAIN);
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
@@ -688,7 +692,6 @@ static void process_queued_io_list(struct multipath *m)
static void process_queued_bios(struct work_struct *work)
{
int r;
- unsigned long flags;
struct bio *bio;
struct bio_list bios;
struct blk_plug plug;
@@ -697,16 +700,16 @@ static void process_queued_bios(struct work_struct *work)
bio_list_init(&bios);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (bio_list_empty(&m->queued_bios)) {
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
return;
}
bio_list_merge_init(&bios, &m->queued_bios);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
@@ -1190,7 +1193,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct dm_arg_set as;
unsigned int pg_count = 0;
unsigned int next_pg_num;
- unsigned long flags;
as.argc = argc;
as.argv = argv;
@@ -1255,9 +1257,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
enable_nopath_timeout(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
@@ -1292,23 +1294,21 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
if (m->hw_handler_name) {
- unsigned long flags;
-
if (!atomic_read(&m->pg_init_in_progress))
goto skip;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (atomic_read(&m->pg_init_in_progress) &&
!test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
skip:
if (m->queue_mode == DM_TYPE_BIO_BASED)
@@ -1370,11 +1370,10 @@ out:
static int reinstate_path(struct pgpath *pgpath)
{
int r = 0, run_queue = 0;
- unsigned long flags;
struct multipath *m = pgpath->pg->m;
unsigned int nr_valid_paths;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (pgpath->is_active)
goto out;
@@ -1404,7 +1403,7 @@ static int reinstate_path(struct pgpath *pgpath)
schedule_work(&m->trigger_event);
out:
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
if (run_queue) {
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
@@ -1439,15 +1438,19 @@ static int action_dev(struct multipath *m, dev_t dev, action_fn action)
* Temporarily try to avoid having to use the specified PG
*/
static void bypass_pg(struct multipath *m, struct priority_group *pg,
- bool bypassed)
+ bool bypassed, bool can_be_delayed)
{
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
pg->bypassed = bypassed;
- m->current_pgpath = NULL;
- m->current_pg = NULL;
+ if (can_be_delayed && test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags))
+ set_bit(MPATHF_NEED_PG_SWITCH, &m->flags);
+ else {
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
spin_unlock_irqrestore(&m->lock, flags);
@@ -1461,7 +1464,6 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
{
struct priority_group *pg;
unsigned int pgnum;
- unsigned long flags;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
@@ -1470,17 +1472,21 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
return -EINVAL;
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
list_for_each_entry(pg, &m->priority_groups, list) {
pg->bypassed = false;
if (--pgnum)
continue;
- m->current_pgpath = NULL;
- m->current_pg = NULL;
+ if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags))
+ set_bit(MPATHF_NEED_PG_SWITCH, &m->flags);
+ else {
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
m->next_pg = pg;
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
schedule_work(&m->trigger_event);
return 0;
@@ -1507,7 +1513,7 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
break;
}
- bypass_pg(m, pg, bypassed);
+ bypass_pg(m, pg, bypassed, true);
return 0;
}
@@ -1561,7 +1567,7 @@ static void pg_init_done(void *data, int errors)
* Probably doing something like FW upgrade on the
* controller so try the other pg.
*/
- bypass_pg(m, pg, true);
+ bypass_pg(m, pg, true, false);
break;
case SCSI_DH_RETRY:
/* Wait before retrying. */
@@ -1742,6 +1748,9 @@ static void multipath_presuspend(struct dm_target *ti)
{
struct multipath *m = ti->private;
+ spin_lock_irq(&m->lock);
+ m->is_suspending = true;
+ spin_unlock_irq(&m->lock);
/* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
queue_if_no_path(m, false, true, __func__);
@@ -1762,9 +1771,9 @@ static void multipath_postsuspend(struct dm_target *ti)
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = ti->private;
- unsigned long flags;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
+ m->is_suspending = false;
if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
@@ -1775,7 +1784,7 @@ static void multipath_resume(struct dm_target *ti)
test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
/*
@@ -1798,14 +1807,13 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{
int sz = 0, pg_counter, pgpath_counter;
- unsigned long flags;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
unsigned int pg_num;
char state;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
/* Features */
if (type == STATUSTYPE_INFO)
@@ -1845,10 +1853,10 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("%u ", m->nr_priority_groups);
- if (m->next_pg)
- pg_num = m->next_pg->pg_num;
- else if (m->current_pg)
+ if (m->current_pg)
pg_num = m->current_pg->pg_num;
+ else if (m->next_pg)
+ pg_num = m->next_pg->pg_num;
else
pg_num = (m->nr_priority_groups ? 1 : 0);
@@ -1951,7 +1959,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
break;
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
@@ -1961,7 +1969,6 @@ static int multipath_message(struct dm_target *ti, unsigned int argc, char **arg
dev_t dev;
struct multipath *m = ti->private;
action_fn action;
- unsigned long flags;
mutex_lock(&m->work_mutex);
@@ -1973,9 +1980,9 @@ static int multipath_message(struct dm_target *ti, unsigned int argc, char **arg
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, true, false, __func__);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
enable_nopath_timeout(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, false, false, __func__);
@@ -2021,14 +2028,132 @@ out:
return r;
}
+/*
+ * Perform a minimal read from the given path to find out whether the
+ * path still works. If a path error occurs, fail it.
+ */
+static int probe_path(struct pgpath *pgpath)
+{
+ struct block_device *bdev = pgpath->path.dev->bdev;
+ unsigned int read_size = bdev_logical_block_size(bdev);
+ struct page *page;
+ struct bio *bio;
+ blk_status_t status;
+ int r = 0;
+
+ if (WARN_ON_ONCE(read_size > PAGE_SIZE))
+ return -EINVAL;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ /* Perform a minimal read: Sector 0, length read_size */
+ bio = bio_alloc(bdev, 1, REQ_OP_READ, GFP_KERNEL);
+ if (!bio) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ bio->bi_iter.bi_sector = 0;
+ __bio_add_page(bio, page, read_size, 0);
+ submit_bio_wait(bio);
+ status = bio->bi_status;
+ bio_put(bio);
+
+ if (status && blk_path_error(status))
+ fail_path(pgpath);
+
+out:
+ __free_page(page);
+ return r;
+}
+
+/*
+ * Probe all active paths in current_pg to find out whether they still work.
+ * Fail all paths that do not work.
+ *
+ * Return -ENOTCONN if no valid path is left (even outside of current_pg). We
+ * cannot probe paths in other pgs without switching current_pg, so if valid
+ * paths are only in different pgs, they may or may not work. Additionally
+ * we should not probe paths in a pathgroup that is in the process of
+ * Initializing. Userspace can submit a request and we'll switch and wait
+ * for the pathgroup to be initialized. If the request fails, it may need to
+ * probe again.
+ */
+static int probe_active_paths(struct multipath *m)
+{
+ struct pgpath *pgpath;
+ struct priority_group *pg = NULL;
+ int r = 0;
+
+ spin_lock_irq(&m->lock);
+ if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) {
+ wait_event_lock_irq(m->probe_wait,
+ !test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags),
+ m->lock);
+ /*
+ * if we waited because a probe was already in progress,
+ * and it probed the current active pathgroup, don't
+ * reprobe. Just return the number of valid paths
+ */
+ if (m->current_pg == m->last_probed_pg)
+ goto skip_probe;
+ }
+ if (!m->current_pg || m->is_suspending ||
+ test_bit(MPATHF_QUEUE_IO, &m->flags))
+ goto skip_probe;
+ set_bit(MPATHF_DELAY_PG_SWITCH, &m->flags);
+ pg = m->last_probed_pg = m->current_pg;
+ spin_unlock_irq(&m->lock);
+
+ list_for_each_entry(pgpath, &pg->pgpaths, list) {
+ if (pg != READ_ONCE(m->current_pg) ||
+ READ_ONCE(m->is_suspending))
+ goto out;
+ if (!pgpath->is_active)
+ continue;
+
+ r = probe_path(pgpath);
+ if (r < 0)
+ goto out;
+ }
+
+out:
+ spin_lock_irq(&m->lock);
+ clear_bit(MPATHF_DELAY_PG_SWITCH, &m->flags);
+ if (test_and_clear_bit(MPATHF_NEED_PG_SWITCH, &m->flags)) {
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
+skip_probe:
+ if (r == 0 && !atomic_read(&m->nr_valid_paths))
+ r = -ENOTCONN;
+ spin_unlock_irq(&m->lock);
+ if (pg)
+ wake_up(&m->probe_wait);
+ return r;
+}
+
static int multipath_prepare_ioctl(struct dm_target *ti,
- struct block_device **bdev)
+ struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct multipath *m = ti->private;
struct pgpath *pgpath;
- unsigned long flags;
int r;
+ if (_IOC_TYPE(cmd) == DM_IOCTL) {
+ *forward = false;
+ switch (cmd) {
+ case DM_MPATH_PROBE_PATHS:
+ return probe_active_paths(m);
+ default:
+ return -ENOTTY;
+ }
+ }
+
pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
pgpath = choose_pgpath(m, 0);
@@ -2044,10 +2169,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
} else {
/* No path is available */
r = -EIO;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
r = -ENOTCONN;
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
if (r == -ENOTCONN) {
@@ -2055,10 +2180,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
(void) __pg_init_all_paths(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
}
@@ -2180,7 +2305,7 @@ static int multipath_busy(struct dm_target *ti)
*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 14, 0},
+ .version = {1, 15, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
DM_TARGET_PASSES_INTEGRITY,
.module = THIS_MODULE,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9e615b4f1f5e..785af4816584 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -133,10 +133,9 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
spin_lock_irqsave(&ms->lock, flags);
should_wake = !(bl->head);
bio_list_add(bl, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
-
if (should_wake)
wakeup_mirrord(ms);
+ spin_unlock_irqrestore(&ms->lock, flags);
}
static void dispatch_bios(void *context, struct bio_list *bio_list)
@@ -646,9 +645,9 @@ static void write_callback(unsigned long error, void *context)
if (!ms->failures.head)
should_wake = 1;
bio_list_add(&ms->failures, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
wakeup_mirrord(ms);
+ spin_unlock_irqrestore(&ms->lock, flags);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index e23076f7ece2..a6ca92049c10 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -217,10 +217,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
if (unlikely(error == BLK_STS_TARGET)) {
if (req_op(clone) == REQ_OP_DISCARD &&
!clone->q->limits.max_discard_sectors)
- disable_discard(tio->md);
+ blk_queue_disable_discard(tio->md->queue);
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
!clone->q->limits.max_write_zeroes_sectors)
- disable_write_zeroes(tio->md);
+ blk_queue_disable_write_zeroes(tio->md->queue);
}
switch (r) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a1b7535c508a..a7dc04bd55e5 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -405,7 +405,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
unsigned int i;
- char major_minor[16];
+ char major_minor[22];
struct stripe_c *sc = ti->private;
if (!*error)
@@ -417,8 +417,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
if (*error == BLK_STS_NOTSUPP)
return DM_ENDIO_DONE;
- memset(major_minor, 0, sizeof(major_minor));
- sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
+ format_dev_t(major_minor, bio_dev(bio));
/*
* Test to see which stripe drive triggered the event
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index dfd9fb52a6f3..bb1a70b5a215 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -517,7 +517,9 @@ static void switch_status(struct dm_target *ti, status_type_t type,
*
* Passthrough all ioctls to the path for sector 0
*/
-static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct switch_ctx *sctx = ti->private;
unsigned int path_nr;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 6b23e777e10e..24a857ff6d0b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -117,7 +117,6 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
n_targets = (struct dm_target *) (n_highs + num);
memset(n_highs, -1, sizeof(*n_highs) * num);
- kvfree(t->highs);
t->num_allocated = num;
t->highs = n_highs;
@@ -257,7 +256,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
if (bdev_is_zoned(bdev)) {
unsigned int zone_sectors = bdev_zone_sectors(bdev);
- if (start & (zone_sectors - 1)) {
+ if (!bdev_is_zone_aligned(bdev, start)) {
DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)start,
@@ -274,7 +273,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* devices do not end up with a smaller zone in the middle of
* the sector range.
*/
- if (len & (zone_sectors - 1)) {
+ if (!bdev_is_zone_aligned(bdev, len)) {
DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)len,
@@ -431,6 +430,13 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
+ mutex_lock(&q->limits_lock);
+ /*
+ * BLK_FEAT_ATOMIC_WRITES is not inherited from the bottom device in
+ * blk_stack_limits(), so do it manually.
+ */
+ limits->features |= (q->limits.features & BLK_FEAT_ATOMIC_WRITES);
+
if (blk_stack_limits(limits, &q->limits,
get_start_sect(bdev) + start) < 0)
DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
@@ -448,6 +454,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
*/
if (!dm_target_has_integrity(ti->type))
queue_limits_stack_integrity_bdev(limits, bdev);
+ mutex_unlock(&q->limits_lock);
return 0;
}
@@ -1189,6 +1196,176 @@ put_live_table:
return 0;
}
+enum dm_wrappedkey_op {
+ DERIVE_SW_SECRET,
+ IMPORT_KEY,
+ GENERATE_KEY,
+ PREPARE_KEY,
+};
+
+struct dm_wrappedkey_op_args {
+ enum dm_wrappedkey_op op;
+ int err;
+ union {
+ struct {
+ const u8 *eph_key;
+ size_t eph_key_size;
+ u8 *sw_secret;
+ } derive_sw_secret;
+ struct {
+ const u8 *raw_key;
+ size_t raw_key_size;
+ u8 *lt_key;
+ } import_key;
+ struct {
+ u8 *lt_key;
+ } generate_key;
+ struct {
+ const u8 *lt_key;
+ size_t lt_key_size;
+ u8 *eph_key;
+ } prepare_key;
+ };
+};
+
+static int dm_wrappedkey_op_callback(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_wrappedkey_op_args *args = data;
+ struct block_device *bdev = dev->bdev;
+ struct blk_crypto_profile *profile =
+ bdev_get_queue(bdev)->crypto_profile;
+ int err = -EOPNOTSUPP;
+
+ if (!args->err)
+ return 0;
+
+ switch (args->op) {
+ case DERIVE_SW_SECRET:
+ err = blk_crypto_derive_sw_secret(
+ bdev,
+ args->derive_sw_secret.eph_key,
+ args->derive_sw_secret.eph_key_size,
+ args->derive_sw_secret.sw_secret);
+ break;
+ case IMPORT_KEY:
+ err = blk_crypto_import_key(profile,
+ args->import_key.raw_key,
+ args->import_key.raw_key_size,
+ args->import_key.lt_key);
+ break;
+ case GENERATE_KEY:
+ err = blk_crypto_generate_key(profile,
+ args->generate_key.lt_key);
+ break;
+ case PREPARE_KEY:
+ err = blk_crypto_prepare_key(profile,
+ args->prepare_key.lt_key,
+ args->prepare_key.lt_key_size,
+ args->prepare_key.eph_key);
+ break;
+ }
+ args->err = err;
+
+ /* Try another device in case this fails. */
+ return 0;
+}
+
+static int dm_exec_wrappedkey_op(struct blk_crypto_profile *profile,
+ struct dm_wrappedkey_op_args *args)
+{
+ struct mapped_device *md =
+ container_of(profile, struct dm_crypto_profile, profile)->md;
+ struct dm_target *ti;
+ struct dm_table *t;
+ int srcu_idx;
+ int i;
+
+ args->err = -EOPNOTSUPP;
+
+ t = dm_get_live_table(md, &srcu_idx);
+ if (!t)
+ goto out;
+
+ /*
+ * blk-crypto currently has no support for multiple incompatible
+ * implementations of wrapped inline crypto keys on a single system.
+ * It was already checked earlier that support for wrapped keys was
+ * declared on all underlying devices. Thus, all the underlying devices
+ * should support all wrapped key operations and they should behave
+ * identically, i.e. work with the same keys. So, just executing the
+ * operation on the first device on which it works suffices for now.
+ */
+ for (i = 0; i < t->num_targets; i++) {
+ ti = dm_table_get_target(t, i);
+ if (!ti->type->iterate_devices)
+ continue;
+ ti->type->iterate_devices(ti, dm_wrappedkey_op_callback, args);
+ if (!args->err)
+ break;
+ }
+out:
+ dm_put_live_table(md, srcu_idx);
+ return args->err;
+}
+
+static int dm_derive_sw_secret(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = DERIVE_SW_SECRET,
+ .derive_sw_secret = {
+ .eph_key = eph_key,
+ .eph_key_size = eph_key_size,
+ .sw_secret = sw_secret,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
+static int dm_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = IMPORT_KEY,
+ .import_key = {
+ .raw_key = raw_key,
+ .raw_key_size = raw_key_size,
+ .lt_key = lt_key,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
+static int dm_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = GENERATE_KEY,
+ .generate_key = {
+ .lt_key = lt_key,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
+static int dm_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = PREPARE_KEY,
+ .prepare_key = {
+ .lt_key = lt_key,
+ .lt_key_size = lt_key_size,
+ .eph_key = eph_key,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
static int
device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@@ -1263,6 +1440,13 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
profile);
}
+ if (profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) {
+ profile->ll_ops.derive_sw_secret = dm_derive_sw_secret;
+ profile->ll_ops.import_key = dm_import_key;
+ profile->ll_ops.generate_key = dm_generate_key;
+ profile->ll_ops.prepare_key = dm_prepare_key;
+ }
+
if (t->md->queue &&
!blk_crypto_has_capabilities(profile,
t->md->queue->crypto_profile)) {
@@ -1490,6 +1674,18 @@ bool dm_table_has_no_data_devices(struct dm_table *t)
return true;
}
+bool dm_table_is_wildcard(struct dm_table *t)
+{
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (!dm_target_is_wildcard(ti->type))
+ return false;
+ }
+
+ return true;
+}
+
static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1721,8 +1917,12 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
+ int b;
- return !q->limits.max_write_zeroes_sectors;
+ mutex_lock(&q->limits_lock);
+ b = !q->limits.max_write_zeroes_sectors;
+ mutex_unlock(&q->limits_lock);
+ return b;
}
static bool dm_table_supports_write_zeroes(struct dm_table *t)
@@ -1830,10 +2030,24 @@ static bool dm_table_supports_atomic_writes(struct dm_table *t)
return true;
}
+bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size,
+ sector_t new_size)
+{
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && dm_has_zone_plugs(t->md) &&
+ old_size != new_size) {
+ DMWARN("%s: device has zone write plug resources. "
+ "Cannot change size",
+ dm_device_name(t->md));
+ return false;
+ }
+ return true;
+}
+
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
int r;
+ struct queue_limits old_limits;
if (!dm_table_supports_nowait(t))
limits->features &= ~BLK_FEAT_NOWAIT;
@@ -1860,28 +2074,30 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_supports_flush(t))
limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
- if (dm_table_supports_dax(t, device_not_dax_capable)) {
+ if (dm_table_supports_dax(t, device_not_dax_capable))
limits->features |= BLK_FEAT_DAX;
- if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
- set_dax_synchronous(t->md->dax_dev);
- } else
+ else
limits->features &= ~BLK_FEAT_DAX;
- if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
- dax_write_cache(t->md->dax_dev, true);
-
/* For a zoned table, setup the zone related queue attributes. */
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- (limits->features & BLK_FEAT_ZONED)) {
- r = dm_set_zones_restrictions(t, q, limits);
- if (r)
- return r;
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ if (limits->features & BLK_FEAT_ZONED) {
+ r = dm_set_zones_restrictions(t, q, limits);
+ if (r)
+ return r;
+ } else if (dm_has_zone_plugs(t->md)) {
+ DMWARN("%s: device has zone write plug resources. "
+ "Cannot switch to non-zoned table.",
+ dm_device_name(t->md));
+ return -EINVAL;
+ }
}
if (dm_table_supports_atomic_writes(t))
limits->features |= BLK_FEAT_ATOMIC_WRITES;
- r = queue_limits_set(q, limits);
+ old_limits = queue_limits_start_update(q);
+ r = queue_limits_commit_update(q, limits);
if (r)
return r;
@@ -1892,10 +2108,21 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(limits->features & BLK_FEAT_ZONED)) {
r = dm_revalidate_zones(t, q);
- if (r)
+ if (r) {
+ queue_limits_set(q, &old_limits);
return r;
+ }
}
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ dm_finalize_zone_settings(t, limits);
+
+ if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
+ set_dax_synchronous(t->md->dax_dev);
+
+ if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
+ dax_write_cache(t->md->dax_dev, true);
+
dm_update_crypto_profile(q, t);
return 0;
}
diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c
index 655453bb276b..425b3a74f4db 100644
--- a/drivers/md/dm-vdo/indexer/volume.c
+++ b/drivers/md/dm-vdo/indexer/volume.c
@@ -754,10 +754,11 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
u32 physical_page, struct cached_page **page_ptr)
{
struct cached_page *page;
+ unsigned int zone_number = request->zone_number;
get_page_from_cache(&volume->page_cache, physical_page, &page);
if (page != NULL) {
- if (request->zone_number == 0) {
+ if (zone_number == 0) {
/* Only one zone is allowed to update the LRU. */
make_page_most_recent(&volume->page_cache, page);
}
@@ -767,7 +768,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
}
/* Prepare to enqueue a read for the page. */
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
mutex_lock(&volume->read_threads_mutex);
/*
@@ -787,8 +788,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
* the order does not matter for correctness as it does below.
*/
mutex_unlock(&volume->read_threads_mutex);
- begin_pending_search(&volume->page_cache, physical_page,
- request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
return UDS_QUEUED;
}
@@ -797,7 +797,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
* "search pending" state in careful order so no other thread can mess with the data before
* the caller gets to look at it.
*/
- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
mutex_unlock(&volume->read_threads_mutex);
*page_ptr = page;
return UDS_SUCCESS;
@@ -849,6 +849,7 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
{
int result;
struct cached_page *page = NULL;
+ unsigned int zone_number = request->zone_number;
u32 physical_page = map_to_physical_page(volume->geometry, chapter,
index_page_number);
@@ -858,18 +859,18 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
* invalidation by the reader thread, before the reader thread has noticed that the
* invalidate_counter has been incremented.
*/
- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
result = get_volume_page_protected(volume, request, physical_page, &page);
if (result != UDS_SUCCESS) {
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return result;
}
result = uds_search_chapter_index_page(&page->index_page, volume->geometry,
&request->record_name,
record_page_number);
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return result;
}
@@ -882,6 +883,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
{
struct cached_page *record_page;
struct index_geometry *geometry = volume->geometry;
+ unsigned int zone_number = request->zone_number;
int result;
u32 physical_page, page_number;
@@ -905,11 +907,11 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
* invalidation by the reader thread, before the reader thread has noticed that the
* invalidate_counter has been incremented.
*/
- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
result = get_volume_page_protected(volume, request, physical_page, &record_page);
if (result != UDS_SUCCESS) {
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return result;
}
@@ -917,7 +919,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
&request->record_name, geometry, &request->old_metadata))
*found = true;
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return UDS_SUCCESS;
}
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0c41949db784..631a887b487c 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -593,6 +593,10 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
(*argc)--;
if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
+ if (v->fec->dev) {
+ ti->error = "FEC device already specified";
+ return -EINVAL;
+ }
r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev);
if (r) {
ti->error = "FEC device lookup failed";
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 3c427f18a04b..81186bded1ce 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -682,7 +682,8 @@ static void verity_bh_work(struct work_struct *w)
static inline bool verity_use_bh(unsigned int bytes, unsigned short ioprio)
{
return ioprio <= IOPRIO_CLASS_IDLE &&
- bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]);
+ bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]) &&
+ !need_resched();
}
static void verity_end_io(struct bio *bio)
@@ -993,7 +994,9 @@ static void verity_status(struct dm_target *ti, status_type_t type,
}
}
-static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct dm_verity *v = ti->private;
@@ -1120,6 +1123,9 @@ static int verity_alloc_most_once(struct dm_verity *v)
{
struct dm_target *ti = v->ti;
+ if (v->validated_blocks)
+ return 0;
+
/* the bitset can only handle INT_MAX blocks */
if (v->data_blocks > INT_MAX) {
ti->error = "device too large to use check_at_most_once";
@@ -1143,6 +1149,9 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
struct dm_verity_io *io;
u8 *zero_data;
+ if (v->zero_digest)
+ return 0;
+
v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
if (!v->zero_digest)
@@ -1577,7 +1586,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- /* Root hash signature is a optional parameter*/
+ /* Root hash signature is an optional parameter */
r = verity_verify_root_hash(root_hash_digest_to_validate,
strlen(root_hash_digest_to_validate),
verify_args.sig,
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index a9e2c6c0a33c..d5261a0e4232 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -71,9 +71,14 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
const char *arg_name)
{
struct dm_target *ti = v->ti;
- int ret = 0;
+ int ret;
const char *sig_key = NULL;
+ if (v->signature_key_desc) {
+ ti->error = DM_VERITY_VERIFY_ERR("root_hash_sig_key_desc already specified");
+ return -EINVAL;
+ }
+
if (!*argc) {
ti->error = DM_VERITY_VERIFY_ERR("Signature key not specified");
return -EINVAL;
@@ -83,14 +88,18 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
(*argc)--;
ret = verity_verify_get_sig_from_key(sig_key, sig_opts);
- if (ret < 0)
+ if (ret < 0) {
ti->error = DM_VERITY_VERIFY_ERR("Invalid key specified");
+ return ret;
+ }
v->signature_key_desc = kstrdup(sig_key, GFP_KERNEL);
- if (!v->signature_key_desc)
+ if (!v->signature_key_desc) {
+ ti->error = DM_VERITY_VERIFY_ERR("Could not allocate memory for signature key");
return -ENOMEM;
+ }
- return ret;
+ return 0;
}
/*
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 20edd3fabbab..3d31b82e0730 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -56,24 +56,31 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
{
struct mapped_device *md = disk->private_data;
struct dm_table *map;
- int srcu_idx, ret;
+ struct dm_table *zone_revalidate_map = md->zone_revalidate_map;
+ int srcu_idx, ret = -EIO;
+ bool put_table = false;
- if (!md->zone_revalidate_map) {
- /* Regular user context */
+ if (!zone_revalidate_map || md->revalidate_map_task != current) {
+ /*
+ * Regular user context or
+ * Zone revalidation during __bind() is in progress, but this
+ * call is from a different process
+ */
if (dm_suspended_md(md))
return -EAGAIN;
map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- return -EIO;
+ put_table = true;
} else {
/* Zone revalidation during __bind() */
- map = md->zone_revalidate_map;
+ map = zone_revalidate_map;
}
- ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data);
+ if (map)
+ ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb,
+ data);
- if (!md->zone_revalidate_map)
+ if (put_table)
dm_put_live_table(md, srcu_idx);
return ret;
@@ -153,33 +160,36 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q)
{
struct mapped_device *md = t->md;
struct gendisk *disk = md->disk;
+ unsigned int nr_zones = disk->nr_zones;
int ret;
if (!get_capacity(disk))
return 0;
- /* Revalidate only if something changed. */
- if (!disk->nr_zones || disk->nr_zones != md->nr_zones) {
- DMINFO("%s using %s zone append",
- disk->disk_name,
- queue_emulates_zone_append(q) ? "emulated" : "native");
- md->nr_zones = 0;
- }
-
- if (md->nr_zones)
+ /*
+ * Do not revalidate if zone write plug resources have already
+ * been allocated.
+ */
+ if (dm_has_zone_plugs(md))
return 0;
+ DMINFO("%s using %s zone append", disk->disk_name,
+ queue_emulates_zone_append(q) ? "emulated" : "native");
+
/*
* Our table is not live yet. So the call to dm_get_live_table()
* in dm_blk_report_zones() will fail. Set a temporary pointer to
* our table for dm_blk_report_zones() to use directly.
*/
md->zone_revalidate_map = t;
+ md->revalidate_map_task = current;
ret = blk_revalidate_disk_zones(disk);
+ md->revalidate_map_task = NULL;
md->zone_revalidate_map = NULL;
if (ret) {
DMERR("Revalidate zones failed %d", ret);
+ disk->nr_zones = nr_zones;
return ret;
}
@@ -337,15 +347,15 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
/*
* Check if zone append is natively supported, and if not, set the
- * mapped device queue as needing zone append emulation.
+ * mapped device queue as needing zone append emulation. If zone
+ * append is natively supported, make sure that
+ * max_hw_zone_append_sectors is not set to 0.
*/
WARN_ON_ONCE(queue_is_mq(q));
- if (dm_table_supports_zone_append(t)) {
- clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- } else {
- set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ if (!dm_table_supports_zone_append(t))
lim->max_hw_zone_append_sectors = 0;
- }
+ else if (lim->max_hw_zone_append_sectors == 0)
+ lim->max_hw_zone_append_sectors = lim->max_zone_append_sectors;
/*
* Determine the max open and max active zone limits for the mapped
@@ -380,15 +390,28 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
lim->max_open_zones = 0;
lim->max_active_zones = 0;
lim->max_hw_zone_append_sectors = 0;
+ lim->max_zone_append_sectors = 0;
lim->zone_write_granularity = 0;
lim->chunk_sectors = 0;
lim->features &= ~BLK_FEAT_ZONED;
- clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- md->nr_zones = 0;
- disk->nr_zones = 0;
return 0;
}
+ if (get_capacity(disk) && dm_has_zone_plugs(t->md)) {
+ if (q->limits.chunk_sectors != lim->chunk_sectors) {
+ DMWARN("%s: device has zone write plug resources. "
+ "Cannot change zone size",
+ disk->disk_name);
+ return -EINVAL;
+ }
+ if (lim->max_hw_zone_append_sectors != 0 &&
+ !dm_table_is_wildcard(t)) {
+ DMWARN("%s: device has zone write plug resources. "
+ "New table must emulate zone append",
+ disk->disk_name);
+ return -EINVAL;
+ }
+ }
/*
* Warn once (when the capacity is not yet set) if the mapped device is
* partially using zone resources of the target devices as that leads to
@@ -408,6 +431,23 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
return 0;
}
+void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim)
+{
+ struct mapped_device *md = t->md;
+
+ if (lim->features & BLK_FEAT_ZONED) {
+ if (dm_table_supports_zone_append(t))
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ else
+ set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ } else {
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ md->nr_zones = 0;
+ md->disk->nr_zones = 0;
+ }
+}
+
+
/*
* IO completion callback called from clone_endio().
*/
@@ -423,9 +463,9 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
*/
if (clone->bi_status == BLK_STS_OK &&
bio_op(clone) == REQ_OP_ZONE_APPEND) {
- sector_t mask = bdev_zone_sectors(disk->part0) - 1;
-
- orig_bio->bi_iter.bi_sector += clone->bi_iter.bi_sector & mask;
+ orig_bio->bi_iter.bi_sector +=
+ bdev_offset_from_zone_start(disk->part0,
+ clone->bi_iter.bi_sector);
}
return;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 6141fc25d842..5da3db06da10 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1015,7 +1015,8 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
/*
* Pass on ioctl to the backend device.
*/
-static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg, bool *forward)
{
struct dmz_target *dmz = ti->private;
struct dmz_dev *dev = &dmz->dev[0];
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5ab7574c0c76..1726f0f828cc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -411,7 +411,8 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
- struct block_device **bdev)
+ struct block_device **bdev, unsigned int cmd,
+ unsigned long arg, bool *forward)
{
struct dm_target *ti;
struct dm_table *map;
@@ -434,8 +435,8 @@ retry:
if (dm_suspended_md(md))
return -EAGAIN;
- r = ti->type->prepare_ioctl(ti, bdev);
- if (r == -ENOTCONN && !fatal_signal_pending(current)) {
+ r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward);
+ if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
fsleep(10000);
goto retry;
@@ -454,9 +455,10 @@ static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
{
struct mapped_device *md = bdev->bd_disk->private_data;
int r, srcu_idx;
+ bool forward = true;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
- if (r < 0)
+ r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward);
+ if (!forward || r < 0)
goto out;
if (r > 0) {
@@ -1082,22 +1084,6 @@ static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
return &md->queue->limits;
}
-void disable_discard(struct mapped_device *md)
-{
- struct queue_limits *limits = dm_get_queue_limits(md);
-
- /* device doesn't really support DISCARD, disable it */
- limits->max_hw_discard_sectors = 0;
-}
-
-void disable_write_zeroes(struct mapped_device *md)
-{
- struct queue_limits *limits = dm_get_queue_limits(md);
-
- /* device doesn't really support WRITE ZEROES, disable it */
- limits->max_write_zeroes_sectors = 0;
-}
-
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
{
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
@@ -1115,10 +1101,10 @@ static void clone_endio(struct bio *bio)
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bdev_max_discard_sectors(bio->bi_bdev))
- disable_discard(md);
+ blk_queue_disable_discard(md->queue);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bdev_write_zeroes_sectors(bio->bi_bdev))
- disable_write_zeroes(md);
+ blk_queue_disable_write_zeroes(md->queue);
}
if (static_branch_unlikely(&zoned_enabled) &&
@@ -2421,21 +2407,35 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{
struct dm_table *old_map;
- sector_t size;
+ sector_t size, old_size;
int ret;
lockdep_assert_held(&md->suspend_lock);
size = dm_table_get_size(t);
+ old_size = dm_get_size(md);
+
+ if (!dm_table_supports_size_change(t, old_size, size)) {
+ old_map = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ set_capacity(md->disk, size);
+
+ ret = dm_table_set_restrictions(t, md->queue, limits);
+ if (ret) {
+ set_capacity(md->disk, old_size);
+ old_map = ERR_PTR(ret);
+ goto out;
+ }
+
/*
* Wipe any geometry if the size of the table changed.
*/
- if (size != dm_get_size(md))
+ if (size != old_size)
memset(&md->geometry, 0, sizeof(md->geometry));
- set_capacity(md->disk, size);
-
dm_table_event_callback(t, event_callback, md);
if (dm_table_request_based(t)) {
@@ -2453,10 +2453,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* requests in the queue may refer to bio from the old bioset,
* so you must walk through the queue to unprep.
*/
- if (!md->mempools) {
+ if (!md->mempools)
md->mempools = t->mempools;
- t->mempools = NULL;
- }
+ else
+ dm_free_md_mempools(t->mempools);
} else {
/*
* The md may already have mempools that need changing.
@@ -2465,14 +2465,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
*/
dm_free_md_mempools(md->mempools);
md->mempools = t->mempools;
- t->mempools = NULL;
- }
-
- ret = dm_table_set_restrictions(t, md->queue, limits);
- if (ret) {
- old_map = ERR_PTR(ret);
- goto out;
}
+ t->mempools = NULL;
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, (void *)t);
@@ -3638,10 +3632,13 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
int r, srcu_idx;
+ bool forward = true;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
+ /* Not a real ioctl, but targets must not interpret non-DM ioctls */
+ r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward);
if (r < 0)
goto out;
+ WARN_ON_ONCE(!forward);
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_clear)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a0a8ff119815..245f52b59215 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -58,6 +58,7 @@ void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
bool dm_table_has_no_data_devices(struct dm_table *table);
+bool dm_table_is_wildcard(struct dm_table *t);
int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits *limits);
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -72,6 +73,8 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
+bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size,
+ sector_t new_size);
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
@@ -102,6 +105,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *lim);
int dm_revalidate_zones(struct dm_table *t, struct request_queue *q);
+void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
@@ -110,12 +114,14 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
sector_t sector, unsigned int nr_zones,
unsigned long *need_reset);
+#define dm_has_zone_plugs(md) ((md)->disk->zone_wplugs_hash != NULL)
#else
#define dm_blk_report_zones NULL
static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
{
return false;
}
+#define dm_has_zone_plugs(md) false
#endif
/*
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index c82d8d8a16ea..79df0d22e218 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -32,7 +32,7 @@ config ARM_PL172_MPMC
config ATMEL_EBI
bool "Atmel EBI driver"
- default y if ARCH_AT91
+ default ARCH_AT91
depends on ARCH_AT91 || COMPILE_TEST
depends on OF
select MFD_SYSCON
@@ -147,7 +147,7 @@ config FPGA_DFL_EMIF
config MVEBU_DEVBUS
bool "Marvell EBU Device Bus Controller"
- default y if PLAT_ORION
+ default PLAT_ORION
depends on PLAT_ORION || COMPILE_TEST
depends on OF
help
@@ -198,7 +198,7 @@ config DA8XX_DDRCTL
config PL353_SMC
tristate "ARM PL35X Static Memory Controller(SMC) driver"
- default y if ARM
+ default ARM
depends on ARM || COMPILE_TEST
depends on ARM_AMBA
help
@@ -225,6 +225,23 @@ config STM32_FMC2_EBI
devices (like SRAM, ethernet adapters, FPGAs, LCD displays, ...) on
SOCs containing the FMC2 External Bus Interface.
+config STM32_OMM
+ tristate "STM32 Octo Memory Manager"
+ depends on SPI_STM32_OSPI || COMPILE_TEST
+ help
+ This driver manages the muxing between the 2 OSPI busses and
+ the 2 output ports. There are 4 possible muxing configurations:
+ - direct mode (no multiplexing): OSPI1 output is on port 1 and OSPI2
+ output is on port 2
+ - OSPI1 and OSPI2 are multiplexed over the same output port 1
+ - swapped mode (no multiplexing), OSPI1 output is on port 2,
+ OSPI2 output is on port 1
+ - OSPI1 and OSPI2 are multiplexed over the same output port 2
+ It also manages :
+ - the split of the memory area shared between the 2 OSPI instances.
+ - chip select selection override.
+ - the time between 2 transactions in multiplexed mode.
+
source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index d2e6ca9abbe0..c1959661bf63 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
obj-$(CONFIG_PL353_SMC) += pl353-smc.o
obj-$(CONFIG_RENESAS_RPCIF) += renesas-rpc-if.o
obj-$(CONFIG_STM32_FMC2_EBI) += stm32-fmc2-ebi.o
+obj-$(CONFIG_STM32_OMM) += stm32_omm.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/bt1-l2-ctl.c b/drivers/memory/bt1-l2-ctl.c
index 78bd71b203f2..0fd96abc172a 100644
--- a/drivers/memory/bt1-l2-ctl.c
+++ b/drivers/memory/bt1-l2-ctl.c
@@ -222,7 +222,7 @@ static ssize_t l2_ctl_latency_show(struct device *dev,
if (ret)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%u\n", data);
+ return sysfs_emit(buf, "%u\n", data);
}
static ssize_t l2_ctl_latency_store(struct device *dev,
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index a8f5467d6b31..c086c22511f7 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -283,6 +283,43 @@ static int mtk_smi_larb_config_port_gen2_general(struct device *dev)
return 0;
}
+static const u8 mtk_smi_larb_mt6893_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x2, 0x6, 0x2, 0x2, 0x2, 0x28, 0x18, 0x18, 0x1, 0x1, 0x1, 0x8,
+ 0x8, 0x1, 0x3f},
+ [1] = {0x2, 0x6, 0x2, 0x2, 0x2, 0x28, 0x18, 0x18, 0x1, 0x1, 0x1, 0x8,
+ 0x8, 0x1, 0x3f},
+ [2] = {0x5, 0x5, 0x5, 0x5, 0x1, 0x3f},
+ [3] = {0x5, 0x5, 0x5, 0x5, 0x1, 0x3f},
+ [4] = {0x28, 0x19, 0xb, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x1},
+ [5] = {0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x16},
+ [6] = {},
+ [7] = {0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x4, 0x1,
+ 0x4, 0x1, 0xa, 0x6, 0x1, 0xa, 0x6, 0x1, 0x1, 0x1, 0x1, 0x5,
+ 0x3, 0x3, 0x4},
+ [8] = {0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x4, 0x1,
+ 0x4, 0x1, 0xa, 0x6, 0x1, 0xa, 0x6, 0x1, 0x1, 0x1, 0x1, 0x5,
+ 0x3, 0x3, 0x4},
+ [9] = {0x9, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4,
+ 0x9, 0x3, 0x4, 0xe, 0x1, 0x7, 0x8, 0x7, 0x7, 0x1, 0x6, 0x2,
+ 0xf, 0x8, 0x1, 0x1, 0x1},
+ [10] = {},
+ [11] = {0x9, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4,
+ 0x9, 0x3, 0x4, 0xe, 0x1, 0x7, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1, 0x1},
+ [12] = {},
+ [13] = {0x2, 0xc, 0xc, 0xe, 0x6, 0x6, 0x6, 0x6, 0x6, 0x12, 0x6, 0x1},
+ [14] = {0x2, 0xc, 0xc, 0x28, 0x12, 0x6},
+ [15] = {0x28, 0x1, 0x2, 0x28, 0x1},
+ [16] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x2, 0x14, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4},
+ [17] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x2, 0x14, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4},
+ [18] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x2, 0x14, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4},
+ [19] = {0x2, 0x2, 0x4, 0x2},
+ [20] = {0x9, 0x9, 0x5, 0x5, 0x1, 0x1},
+};
+
static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = {
[0] = {0x02, 0x18, 0x22, 0x22, 0x01, 0x02, 0x0a,},
[1] = {0x12, 0x02, 0x14, 0x14, 0x01, 0x18, 0x0a,},
@@ -429,6 +466,12 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
/* DUMMY | IPU0 | IPU1 | CCU | MDLA */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt6893 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG,
+ .ostd = mtk_smi_larb_mt6893_ostd,
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
/* mt8167 do not need the port in larb */
.config_port = mtk_smi_larb_config_port_mt8167,
@@ -474,6 +517,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
{.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712},
{.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779},
{.compatible = "mediatek,mt6795-smi-larb", .data = &mtk_smi_larb_mt8173},
+ {.compatible = "mediatek,mt6893-smi-larb", .data = &mtk_smi_larb_mt6893},
{.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167},
{.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
{.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
@@ -694,6 +738,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt6795 = {
.init = mtk_smi_common_mt6795_init,
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt6893 = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
+ F_MMU1_LARB(5) | F_MMU1_LARB(7),
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
.type = MTK_SMI_GEN2,
.has_gals = true,
@@ -756,6 +807,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
{.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779},
{.compatible = "mediatek,mt6795-smi-common", .data = &mtk_smi_common_mt6795},
+ {.compatible = "mediatek,mt6893-smi-common", .data = &mtk_smi_common_mt6893},
{.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index d5bf3243fe78..9c96eed00194 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2374,7 +2374,7 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- return 1; /* we're input only */
+ return GPIO_LINE_DIRECTION_IN; /* we're input only */
}
static int gpmc_gpio_direction_input(struct gpio_chip *chip,
@@ -2383,17 +2383,6 @@ static int gpmc_gpio_direction_input(struct gpio_chip *chip,
return 0; /* we're input only */
}
-static int gpmc_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- return -EINVAL; /* we're input only */
-}
-
-static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
-}
-
static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
u32 reg;
@@ -2415,8 +2404,6 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc)
gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
- gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
- gpmc->gpio_chip.set = gpmc_gpio_set;
gpmc->gpio_chip.get = gpmc_gpio_get;
gpmc->gpio_chip.base = -1;
diff --git a/drivers/memory/stm32_omm.c b/drivers/memory/stm32_omm.c
new file mode 100644
index 000000000000..79ceb1635698
--- /dev/null
+++ b/drivers/memory/stm32_omm.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author(s): Patrice Chotard <patrice.chotard@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bus/stm32_firewall_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define OMM_CR 0
+#define CR_MUXEN BIT(0)
+#define CR_MUXENMODE_MASK GENMASK(1, 0)
+#define CR_CSSEL_OVR_EN BIT(4)
+#define CR_CSSEL_OVR_MASK GENMASK(6, 5)
+#define CR_REQ2ACK_MASK GENMASK(23, 16)
+
+#define OMM_CHILD_NB 2
+#define OMM_CLK_NB 3
+
+struct stm32_omm {
+ struct resource *mm_res;
+ struct clk_bulk_data clk_bulk[OMM_CLK_NB];
+ struct reset_control *child_reset[OMM_CHILD_NB];
+ void __iomem *io_base;
+ u32 cr;
+ u8 nb_child;
+ bool restore_omm;
+};
+
+static int stm32_omm_set_amcr(struct device *dev, bool set)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ resource_size_t mm_ospi2_size = 0;
+ static const char * const mm_name[] = { "ospi1", "ospi2" };
+ struct regmap *syscfg_regmap;
+ struct device_node *node;
+ struct resource res, res1;
+ u32 amcr_base, amcr_mask;
+ int ret, idx;
+ unsigned int i, amcr, read_amcr;
+
+ for (i = 0; i < omm->nb_child; i++) {
+ idx = of_property_match_string(dev->of_node,
+ "memory-region-names",
+ mm_name[i]);
+ if (idx < 0)
+ continue;
+
+ /* res1 only used on second loop iteration */
+ res1.start = res.start;
+ res1.end = res.end;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", idx);
+ if (!node)
+ continue;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ of_node_put(node);
+ dev_err(dev, "unable to resolve memory region\n");
+ return ret;
+ }
+
+ /* check that memory region fits inside OMM memory map area */
+ if (!resource_contains(omm->mm_res, &res)) {
+ dev_err(dev, "%s doesn't fit inside OMM memory map area\n",
+ mm_name[i]);
+ dev_err(dev, "%pR doesn't fit inside %pR\n", &res, omm->mm_res);
+ of_node_put(node);
+
+ return -EFAULT;
+ }
+
+ if (i == 1) {
+ mm_ospi2_size = resource_size(&res);
+
+ /* check that OMM memory region 1 doesn't overlap memory region 2 */
+ if (resource_overlaps(&res, &res1)) {
+ dev_err(dev, "OMM memory-region %s overlaps memory region %s\n",
+ mm_name[0], mm_name[1]);
+ dev_err(dev, "%pR overlaps %pR\n", &res1, &res);
+ of_node_put(node);
+
+ return -EFAULT;
+ }
+ }
+ of_node_put(node);
+ }
+
+ syscfg_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "st,syscfg-amcr");
+ if (IS_ERR(syscfg_regmap))
+ return dev_err_probe(dev, PTR_ERR(syscfg_regmap),
+ "Failed to get st,syscfg-amcr property\n");
+
+ ret = of_property_read_u32_index(dev->of_node, "st,syscfg-amcr", 1,
+ &amcr_base);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(dev->of_node, "st,syscfg-amcr", 2,
+ &amcr_mask);
+ if (ret)
+ return ret;
+
+ amcr = mm_ospi2_size / SZ_64M;
+
+ if (set)
+ regmap_update_bits(syscfg_regmap, amcr_base, amcr_mask, amcr);
+
+ /* read AMCR and check coherency with memory-map areas defined in DT */
+ regmap_read(syscfg_regmap, amcr_base, &read_amcr);
+ read_amcr = read_amcr >> (ffs(amcr_mask) - 1);
+
+ if (amcr != read_amcr) {
+ dev_err(dev, "AMCR value not coherent with DT memory-map areas\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int stm32_omm_toggle_child_clock(struct device *dev, bool enable)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ int i, ret;
+
+ for (i = 0; i < omm->nb_child; i++) {
+ if (enable) {
+ ret = clk_prepare_enable(omm->clk_bulk[i + 1].clk);
+ if (ret) {
+ dev_err(dev, "Can not enable clock\n");
+ goto clk_error;
+ }
+ } else {
+ clk_disable_unprepare(omm->clk_bulk[i + 1].clk);
+ }
+ }
+
+ return 0;
+
+clk_error:
+ while (i--)
+ clk_disable_unprepare(omm->clk_bulk[i + 1].clk);
+
+ return ret;
+}
+
+static int stm32_omm_disable_child(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ struct reset_control *reset;
+ int ret;
+ u8 i;
+
+ ret = stm32_omm_toggle_child_clock(dev, true);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < omm->nb_child; i++) {
+ /* reset OSPI to ensure CR_EN bit is set to 0 */
+ reset = omm->child_reset[i];
+ ret = reset_control_acquire(reset);
+ if (ret) {
+ stm32_omm_toggle_child_clock(dev, false);
+ dev_err(dev, "Can not acquire reset %d\n", ret);
+ return ret;
+ }
+
+ reset_control_assert(reset);
+ udelay(2);
+ reset_control_deassert(reset);
+
+ reset_control_release(reset);
+ }
+
+ return stm32_omm_toggle_child_clock(dev, false);
+}
+
+static int stm32_omm_configure(struct device *dev)
+{
+ static const char * const clocks_name[] = {"omm", "ospi1", "ospi2"};
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ unsigned long clk_rate_max = 0;
+ u32 mux = 0;
+ u32 cssel_ovr = 0;
+ u32 req2ack = 0;
+ struct reset_control *rstc;
+ unsigned long clk_rate;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < OMM_CLK_NB; i++)
+ omm->clk_bulk[i].id = clocks_name[i];
+
+ /* retrieve OMM, OSPI1 and OSPI2 clocks */
+ ret = devm_clk_bulk_get(dev, OMM_CLK_NB, omm->clk_bulk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get OMM/OSPI's clocks\n");
+
+ /* Ensure both OSPI instance are disabled before configuring OMM */
+ ret = stm32_omm_disable_child(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ /* parse children's clock */
+ for (i = 1; i <= omm->nb_child; i++) {
+ clk_rate = clk_get_rate(omm->clk_bulk[i].clk);
+ if (!clk_rate) {
+ dev_err(dev, "Invalid clock rate\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (clk_rate > clk_rate_max)
+ clk_rate_max = clk_rate;
+ }
+
+ rstc = devm_reset_control_get_exclusive(dev, "omm");
+ if (IS_ERR(rstc)) {
+ ret = dev_err_probe(dev, PTR_ERR(rstc), "reset get failed\n");
+ goto error;
+ }
+
+ reset_control_assert(rstc);
+ udelay(2);
+ reset_control_deassert(rstc);
+
+ omm->cr = readl_relaxed(omm->io_base + OMM_CR);
+ /* optional */
+ ret = of_property_read_u32(dev->of_node, "st,omm-mux", &mux);
+ if (!ret) {
+ if (mux & CR_MUXEN) {
+ ret = of_property_read_u32(dev->of_node, "st,omm-req2ack-ns",
+ &req2ack);
+ if (!ret && !req2ack) {
+ req2ack = DIV_ROUND_UP(req2ack, NSEC_PER_SEC / clk_rate_max) - 1;
+
+ if (req2ack > 256)
+ req2ack = 256;
+ }
+
+ req2ack = FIELD_PREP(CR_REQ2ACK_MASK, req2ack);
+
+ omm->cr &= ~CR_REQ2ACK_MASK;
+ omm->cr |= FIELD_PREP(CR_REQ2ACK_MASK, req2ack);
+
+ /*
+ * If the mux is enabled, the 2 OSPI clocks have to be
+ * always enabled
+ */
+ ret = stm32_omm_toggle_child_clock(dev, true);
+ if (ret)
+ goto error;
+ }
+
+ omm->cr &= ~CR_MUXENMODE_MASK;
+ omm->cr |= FIELD_PREP(CR_MUXENMODE_MASK, mux);
+ }
+
+ /* optional */
+ ret = of_property_read_u32(dev->of_node, "st,omm-cssel-ovr", &cssel_ovr);
+ if (!ret) {
+ omm->cr &= ~CR_CSSEL_OVR_MASK;
+ omm->cr |= FIELD_PREP(CR_CSSEL_OVR_MASK, cssel_ovr);
+ omm->cr |= CR_CSSEL_OVR_EN;
+ }
+
+ omm->restore_omm = true;
+ writel_relaxed(omm->cr, omm->io_base + OMM_CR);
+
+ ret = stm32_omm_set_amcr(dev, true);
+
+error:
+ pm_runtime_put_sync_suspend(dev);
+
+ return ret;
+}
+
+static int stm32_omm_check_access(struct device_node *np)
+{
+ struct stm32_firewall firewall;
+ int ret;
+
+ ret = stm32_firewall_get_firewall(np, &firewall, 1);
+ if (ret)
+ return ret;
+
+ return stm32_firewall_grant_access(&firewall);
+}
+
+static int stm32_omm_probe(struct platform_device *pdev)
+{
+ static const char * const resets_name[] = {"ospi1", "ospi2"};
+ struct device *dev = &pdev->dev;
+ u8 child_access_granted = 0;
+ struct stm32_omm *omm;
+ int i, ret;
+
+ omm = devm_kzalloc(dev, sizeof(*omm), GFP_KERNEL);
+ if (!omm)
+ return -ENOMEM;
+
+ omm->io_base = devm_platform_ioremap_resource_byname(pdev, "regs");
+ if (IS_ERR(omm->io_base))
+ return PTR_ERR(omm->io_base);
+
+ omm->mm_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory_map");
+ if (!omm->mm_res)
+ return -ENODEV;
+
+ /* check child's access */
+ for_each_child_of_node_scoped(dev->of_node, child) {
+ if (omm->nb_child >= OMM_CHILD_NB) {
+ dev_err(dev, "Bad DT, found too much children\n");
+ return -E2BIG;
+ }
+
+ ret = stm32_omm_check_access(child);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ if (!ret)
+ child_access_granted++;
+
+ omm->nb_child++;
+ }
+
+ if (omm->nb_child != OMM_CHILD_NB)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, omm);
+
+ devm_pm_runtime_enable(dev);
+
+ /* check if OMM's resource access is granted */
+ ret = stm32_omm_check_access(dev->of_node);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ for (i = 0; i < omm->nb_child; i++) {
+ omm->child_reset[i] = devm_reset_control_get_exclusive_released(dev,
+ resets_name[i]);
+
+ if (IS_ERR(omm->child_reset[i]))
+ return dev_err_probe(dev, PTR_ERR(omm->child_reset[i]),
+ "Can't get %s reset\n", resets_name[i]);
+ }
+
+ if (!ret && child_access_granted == OMM_CHILD_NB) {
+ ret = stm32_omm_configure(dev);
+ if (ret)
+ return ret;
+ } else {
+ dev_dbg(dev, "Octo Memory Manager resource's access not granted\n");
+ /*
+ * AMCR can't be set, so check if current value is coherent
+ * with memory-map areas defined in DT
+ */
+ ret = stm32_omm_set_amcr(dev, false);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ if (omm->cr & CR_MUXEN)
+ stm32_omm_toggle_child_clock(&pdev->dev, false);
+
+ return dev_err_probe(dev, ret, "Failed to create Octo Memory Manager child\n");
+ }
+
+ return 0;
+}
+
+static void stm32_omm_remove(struct platform_device *pdev)
+{
+ struct stm32_omm *omm = platform_get_drvdata(pdev);
+
+ if (omm->cr & CR_MUXEN)
+ stm32_omm_toggle_child_clock(&pdev->dev, false);
+}
+
+static const struct of_device_id stm32_omm_of_match[] = {
+ { .compatible = "st,stm32mp25-omm", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_omm_of_match);
+
+static int __maybe_unused stm32_omm_runtime_suspend(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(omm->clk_bulk[0].clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_omm_runtime_resume(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(omm->clk_bulk[0].clk);
+}
+
+static int __maybe_unused stm32_omm_suspend(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+
+ if (omm->restore_omm && omm->cr & CR_MUXEN)
+ stm32_omm_toggle_child_clock(dev, false);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int __maybe_unused stm32_omm_resume(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ if (!omm->restore_omm)
+ return 0;
+
+ /* Ensure both OSPI instance are disabled before configuring OMM */
+ ret = stm32_omm_disable_child(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(omm->cr, omm->io_base + OMM_CR);
+ ret = stm32_omm_set_amcr(dev, true);
+ pm_runtime_put_sync_suspend(dev);
+ if (ret)
+ return ret;
+
+ if (omm->cr & CR_MUXEN)
+ ret = stm32_omm_toggle_child_clock(dev, true);
+
+ return ret;
+}
+
+static const struct dev_pm_ops stm32_omm_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_omm_runtime_suspend,
+ stm32_omm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_omm_suspend, stm32_omm_resume)
+};
+
+static struct platform_driver stm32_omm_driver = {
+ .probe = stm32_omm_probe,
+ .remove = stm32_omm_remove,
+ .driver = {
+ .name = "stm32-omm",
+ .of_match_table = stm32_omm_of_match,
+ .pm = &stm32_omm_pm_ops,
+ },
+};
+module_platform_driver(stm32_omm_driver);
+
+MODULE_DESCRIPTION("STMicroelectronics Octo Memory Manager driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index 3fe83d7c2bf8..fc5a27791826 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config TEGRA_MC
bool "NVIDIA Tegra Memory Controller support"
- default y
+ default ARCH_TEGRA
depends on ARCH_TEGRA || (COMPILE_TEST && COMMON_CLK)
select INTERCONNECT
help
@@ -12,7 +12,7 @@ if TEGRA_MC
config TEGRA20_EMC
tristate "NVIDIA Tegra20 External Memory Controller driver"
- default y
+ default ARCH_TEGRA_2x_SOC
depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ
@@ -25,7 +25,7 @@ config TEGRA20_EMC
config TEGRA30_EMC
tristate "NVIDIA Tegra30 External Memory Controller driver"
- default y
+ default ARCH_TEGRA_3x_SOC
depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
select PM_OPP
select DDR
@@ -37,7 +37,7 @@ config TEGRA30_EMC
config TEGRA124_EMC
tristate "NVIDIA Tegra124 External Memory Controller driver"
- default y
+ default ARCH_TEGRA_124_SOC
depends on ARCH_TEGRA_124_SOC || COMPILE_TEST
select TEGRA124_CLK_EMC if ARCH_TEGRA
select PM_OPP
diff --git a/drivers/mfd/88pm886.c b/drivers/mfd/88pm886.c
index 891fdce5d8c1..39dd9a818b0f 100644
--- a/drivers/mfd/88pm886.c
+++ b/drivers/mfd/88pm886.c
@@ -16,11 +16,11 @@ static const struct regmap_config pm886_regmap_config = {
.max_register = PM886_REG_RTC_SPARE6,
};
-static struct regmap_irq pm886_regmap_irqs[] = {
+static const struct regmap_irq pm886_regmap_irqs[] = {
REGMAP_IRQ_REG(PM886_IRQ_ONKEY, 0, PM886_INT_ENA1_ONKEY),
};
-static struct regmap_irq_chip pm886_regmap_irq_chip = {
+static const struct regmap_irq_chip pm886_regmap_irq_chip = {
.name = "88pm886",
.irqs = pm886_regmap_irqs,
.num_irqs = ARRAY_SIZE(pm886_regmap_irqs),
@@ -30,11 +30,11 @@ static struct regmap_irq_chip pm886_regmap_irq_chip = {
.unmask_base = PM886_REG_INT_ENA_1,
};
-static struct resource pm886_onkey_resources[] = {
+static const struct resource pm886_onkey_resources[] = {
DEFINE_RES_IRQ_NAMED(PM886_IRQ_ONKEY, "88pm886-onkey"),
};
-static struct mfd_cell pm886_devs[] = {
+static const struct mfd_cell pm886_devs[] = {
MFD_CELL_RES("88pm886-onkey", pm886_onkey_resources),
MFD_CELL_NAME("88pm886-regulator"),
MFD_CELL_NAME("88pm886-rtc"),
@@ -124,7 +124,11 @@ static int pm886_probe(struct i2c_client *client)
if (err)
return dev_err_probe(dev, err, "Failed to register power off handler\n");
- device_init_wakeup(dev, device_property_read_bool(dev, "wakeup-source"));
+ if (device_property_read_bool(dev, "wakeup-source")) {
+ err = devm_device_init_wakeup(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to init wakeup\n");
+ }
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 96992af22565..6fb3768e3d71 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1312,21 +1312,42 @@ config MFD_RN5T618
functionality of the device.
config MFD_SEC_CORE
- tristate "Samsung Electronics PMIC Series Support"
+ tristate
+ select MFD_CORE
+ select REGMAP_IRQ
+
+config MFD_SEC_ACPM
+ tristate "Samsung Electronics S2MPG1x PMICs"
+ depends on EXYNOS_ACPM_PROTOCOL
+ depends on OF
+ select MFD_SEC_CORE
+ help
+ Support for the Samsung Electronics PMICs with ACPM interface.
+ This is a Power Management IC for mobile applications with buck
+ converters, various LDOs, power meters, RTC, clock outputs, and
+ additional GPIOs interfaces.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
+ To compile this driver as a module, choose M here: the module will be
+ called sec-acpm.
+
+config MFD_SEC_I2C
+ tristate "Samsung Electronics S2MPA/S2MPS1X/S2MPU/S5M series PMICs"
depends on I2C=y
depends on OF
- select MFD_CORE
+ select MFD_SEC_CORE
select REGMAP_I2C
- select REGMAP_IRQ
help
- Support for the Samsung Electronics PMIC devices coming
- usually along with Samsung Exynos SoC chipset.
+ Support for the Samsung Electronics PMIC devices with I2C interface
+ coming usually along with Samsung Exynos SoC chipset.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
- of the device
+ of the device.
To compile this driver as a module, choose M here: the
- module will be called sec-core.
+ module will be called sec-i2c.
Have in mind that important core drivers (like regulators) depend
on this driver so building this as a module might require proper
initial ramdisk or might not boot up as well in certain scenarios.
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 5e5cc279af60..79495f9f3457 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -229,7 +229,10 @@ obj-$(CONFIG_MFD_RK8XX) += rk8xx-core.o
obj-$(CONFIG_MFD_RK8XX_I2C) += rk8xx-i2c.o
obj-$(CONFIG_MFD_RK8XX_SPI) += rk8xx-spi.o
obj-$(CONFIG_MFD_RN5T618) += rn5t618.o
-obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o
+sec-core-objs := sec-common.o sec-irq.o
+obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o
+obj-$(CONFIG_MFD_SEC_ACPM) += sec-acpm.o
+obj-$(CONFIG_MFD_SEC_I2C) += sec-i2c.o
obj-$(CONFIG_MFD_SYSCON) += syscon.o
obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
obj-$(CONFIG_MFD_VEXPRESS_SYSREG) += vexpress-sysreg.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 8ef510e84688..34d66ba9646a 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -320,9 +320,7 @@ static const struct file_operations aat2870_reg_fops = {
static void aat2870_init_debugfs(struct aat2870_data *aat2870)
{
- aat2870->dentry_root = debugfs_create_dir("aat2870", NULL);
-
- debugfs_create_file("regs", 0644, aat2870->dentry_root, aat2870,
+ debugfs_create_file("regs", 0644, aat2870->client->debugfs, aat2870,
&aat2870_reg_fops);
}
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index 6c0d89b0c7e3..7ab6fcc9c27c 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -394,7 +394,9 @@ static int as3722_i2c_probe(struct i2c_client *i2c)
return ret;
}
- device_init_wakeup(as3722->dev, true);
+ ret = devm_device_init_wakeup(as3722->dev);
+ if (ret)
+ return dev_err_probe(as3722->dev, ret, "Failed to init wakeup\n");
dev_dbg(as3722->dev, "AS3722 core driver initialized successfully\n");
return 0;
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 8b56786d85d0..5a8456bbd63f 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -17,6 +17,15 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+/* Under primary I2C address: */
+#define BCM590XX_REG_PMUID 0x1e
+
+#define BCM590XX_REG_PMUREV 0x1f
+#define BCM590XX_PMUREV_DIG_MASK 0xF
+#define BCM590XX_PMUREV_DIG_SHIFT 0
+#define BCM590XX_PMUREV_ANA_MASK 0xF0
+#define BCM590XX_PMUREV_ANA_SHIFT 4
+
static const struct mfd_cell bcm590xx_devs[] = {
{
.name = "bcm590xx-vregs",
@@ -37,6 +46,47 @@ static const struct regmap_config bcm590xx_regmap_config_sec = {
.cache_type = REGCACHE_MAPLE,
};
+/* Map PMU ID value to model name string */
+static const char * const bcm590xx_names[] = {
+ [BCM590XX_PMUID_BCM59054] = "BCM59054",
+ [BCM590XX_PMUID_BCM59056] = "BCM59056",
+};
+
+static int bcm590xx_parse_version(struct bcm590xx *bcm590xx)
+{
+ unsigned int id, rev;
+ int ret;
+
+ /* Get PMU ID and verify that it matches compatible */
+ ret = regmap_read(bcm590xx->regmap_pri, BCM590XX_REG_PMUID, &id);
+ if (ret) {
+ dev_err(bcm590xx->dev, "failed to read PMU ID: %d\n", ret);
+ return ret;
+ }
+
+ if (id != bcm590xx->pmu_id) {
+ dev_err(bcm590xx->dev, "Incorrect ID for %s: expected %x, got %x.\n",
+ bcm590xx_names[bcm590xx->pmu_id], bcm590xx->pmu_id, id);
+ return -ENODEV;
+ }
+
+ /* Get PMU revision and store it in the info struct */
+ ret = regmap_read(bcm590xx->regmap_pri, BCM590XX_REG_PMUREV, &rev);
+ if (ret) {
+ dev_err(bcm590xx->dev, "failed to read PMU revision: %d\n", ret);
+ return ret;
+ }
+
+ bcm590xx->rev_digital = (rev & BCM590XX_PMUREV_DIG_MASK) >> BCM590XX_PMUREV_DIG_SHIFT;
+
+ bcm590xx->rev_analog = (rev & BCM590XX_PMUREV_ANA_MASK) >> BCM590XX_PMUREV_ANA_SHIFT;
+
+ dev_dbg(bcm590xx->dev, "PMU ID 0x%x (%s), revision: digital %d, analog %d",
+ id, bcm590xx_names[id], bcm590xx->rev_digital, bcm590xx->rev_analog);
+
+ return 0;
+}
+
static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
{
struct bcm590xx *bcm590xx;
@@ -50,6 +100,8 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
bcm590xx->dev = &i2c_pri->dev;
bcm590xx->i2c_pri = i2c_pri;
+ bcm590xx->pmu_id = (uintptr_t) of_device_get_match_data(bcm590xx->dev);
+
bcm590xx->regmap_pri = devm_regmap_init_i2c(i2c_pri,
&bcm590xx_regmap_config_pri);
if (IS_ERR(bcm590xx->regmap_pri)) {
@@ -76,6 +128,10 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
goto err;
}
+ ret = bcm590xx_parse_version(bcm590xx);
+ if (ret)
+ goto err;
+
ret = devm_mfd_add_devices(&i2c_pri->dev, -1, bcm590xx_devs,
ARRAY_SIZE(bcm590xx_devs), NULL, 0, NULL);
if (ret < 0) {
@@ -91,12 +147,20 @@ err:
}
static const struct of_device_id bcm590xx_of_match[] = {
- { .compatible = "brcm,bcm59056" },
+ {
+ .compatible = "brcm,bcm59054",
+ .data = (void *)BCM590XX_PMUID_BCM59054,
+ },
+ {
+ .compatible = "brcm,bcm59056",
+ .data = (void *)BCM590XX_PMUID_BCM59056,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, bcm590xx_of_match);
static const struct i2c_device_id bcm590xx_i2c_id[] = {
+ { "bcm59054" },
{ "bcm59056" },
{ }
};
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index 6a585173230b..44797001a432 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -104,11 +104,22 @@ static const struct regmap_config exynos_lpass_reg_conf = {
.fast_io = true,
};
+static void exynos_lpass_disable_lpass(void *data)
+{
+ struct platform_device *pdev = data;
+ struct exynos_lpass *lpass = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ exynos_lpass_disable(lpass);
+}
+
static int exynos_lpass_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_lpass *lpass;
void __iomem *base_top;
+ int ret;
lpass = devm_kzalloc(dev, sizeof(*lpass), GFP_KERNEL);
if (!lpass)
@@ -122,8 +133,8 @@ static int exynos_lpass_probe(struct platform_device *pdev)
if (IS_ERR(lpass->sfr0_clk))
return PTR_ERR(lpass->sfr0_clk);
- lpass->top = regmap_init_mmio(dev, base_top,
- &exynos_lpass_reg_conf);
+ lpass->top = devm_regmap_init_mmio(dev, base_top,
+ &exynos_lpass_reg_conf);
if (IS_ERR(lpass->top)) {
dev_err(dev, "LPASS top regmap initialization failed\n");
return PTR_ERR(lpass->top);
@@ -134,18 +145,11 @@ static int exynos_lpass_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
exynos_lpass_enable(lpass);
- return devm_of_platform_populate(dev);
-}
-
-static void exynos_lpass_remove(struct platform_device *pdev)
-{
- struct exynos_lpass *lpass = platform_get_drvdata(pdev);
+ ret = devm_add_action_or_reset(dev, exynos_lpass_disable_lpass, pdev);
+ if (ret)
+ return ret;
- exynos_lpass_disable(lpass);
- pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- exynos_lpass_disable(lpass);
- regmap_exit(lpass->top);
+ return devm_of_platform_populate(dev);
}
static int __maybe_unused exynos_lpass_suspend(struct device *dev)
@@ -185,7 +189,6 @@ static struct platform_driver exynos_lpass_driver = {
.of_match_table = exynos_lpass_of_match,
},
.probe = exynos_lpass_probe,
- .remove = exynos_lpass_remove,
};
module_platform_driver(exynos_lpass_driver);
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 6fce79ec2dc6..7e7e8af9af22 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -456,6 +456,7 @@ static void max14577_i2c_remove(struct i2c_client *i2c)
{
struct max14577 *max14577 = i2c_get_clientdata(i2c);
+ device_init_wakeup(max14577->dev, false);
mfd_remove_devices(max14577->dev);
regmap_del_irq_chip(max14577->irq, max14577->irq_data);
if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)
diff --git a/drivers/mfd/max77541.c b/drivers/mfd/max77541.c
index d77c31c86e43..f91b4f5373ce 100644
--- a/drivers/mfd/max77541.c
+++ b/drivers/mfd/max77541.c
@@ -152,7 +152,7 @@ static int max77541_pmic_setup(struct device *dev)
if (ret)
return dev_err_probe(dev, ret, "Failed to initialize IRQ\n");
- ret = device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
if (ret)
return dev_err_probe(dev, ret, "Unable to init wakeup\n");
diff --git a/drivers/mfd/max77705.c b/drivers/mfd/max77705.c
index 60c457c21d95..6b263bacb8c2 100644
--- a/drivers/mfd/max77705.c
+++ b/drivers/mfd/max77705.c
@@ -131,7 +131,9 @@ static int max77705_i2c_probe(struct i2c_client *i2c)
if (ret)
return dev_err_probe(dev, ret, "Failed to register child devices\n");
- device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
return 0;
}
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 556aea7ec0a0..ab19ff0c7867 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -201,6 +201,7 @@ static void max8925_remove(struct i2c_client *client)
struct max8925_chip *chip = i2c_get_clientdata(client);
max8925_device_exit(chip);
+ device_init_wakeup(&client->dev, false);
i2c_unregister_device(chip->adc);
i2c_unregister_device(chip->rtc);
}
diff --git a/drivers/mfd/rohm-bd96801.c b/drivers/mfd/rohm-bd96801.c
index 60ec8db790a7..66fa017ad568 100644
--- a/drivers/mfd/rohm-bd96801.c
+++ b/drivers/mfd/rohm-bd96801.c
@@ -38,108 +38,172 @@
#include <linux/types.h>
#include <linux/mfd/rohm-bd96801.h>
+#include <linux/mfd/rohm-bd96802.h>
#include <linux/mfd/rohm-generic.h>
-static const struct resource regulator_errb_irqs[] = {
- DEFINE_RES_IRQ_NAMED(BD96801_OTP_ERR_STAT, "bd96801-otp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_DBIST_ERR_STAT, "bd96801-dbist-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_EEP_ERR_STAT, "bd96801-eep-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_ABIST_ERR_STAT, "bd96801-abist-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_PRSTB_ERR_STAT, "bd96801-prstb-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_DRMOS1_ERR_STAT, "bd96801-drmoserr1"),
- DEFINE_RES_IRQ_NAMED(BD96801_DRMOS2_ERR_STAT, "bd96801-drmoserr2"),
- DEFINE_RES_IRQ_NAMED(BD96801_SLAVE_ERR_STAT, "bd96801-slave-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_VREF_ERR_STAT, "bd96801-vref-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_TSD_ERR_STAT, "bd96801-tsd"),
- DEFINE_RES_IRQ_NAMED(BD96801_UVLO_ERR_STAT, "bd96801-uvlo-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_OVLO_ERR_STAT, "bd96801-ovlo-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_OSC_ERR_STAT, "bd96801-osc-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_PON_ERR_STAT, "bd96801-pon-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_POFF_ERR_STAT, "bd96801-poff-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_CMD_SHDN_ERR_STAT, "bd96801-cmd-shdn-err"),
+struct bd968xx {
+ const struct resource *errb_irqs;
+ const struct resource *intb_irqs;
+ int num_errb_irqs;
+ int num_intb_irqs;
+ const struct regmap_irq_chip *errb_irq_chip;
+ const struct regmap_irq_chip *intb_irq_chip;
+ const struct regmap_config *regmap_config;
+ struct mfd_cell *cells;
+ int num_cells;
+ int unlock_reg;
+ int unlock_val;
+};
+
+static const struct resource bd96801_reg_errb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96801_OTP_ERR_STAT, "otp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_DBIST_ERR_STAT, "dbist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_EEP_ERR_STAT, "eep-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_ABIST_ERR_STAT, "abist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_PRSTB_ERR_STAT, "prstb-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_DRMOS1_ERR_STAT, "drmoserr1"),
+ DEFINE_RES_IRQ_NAMED(BD96801_DRMOS2_ERR_STAT, "drmoserr2"),
+ DEFINE_RES_IRQ_NAMED(BD96801_SLAVE_ERR_STAT, "slave-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_VREF_ERR_STAT, "vref-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_TSD_ERR_STAT, "tsd"),
+ DEFINE_RES_IRQ_NAMED(BD96801_UVLO_ERR_STAT, "uvlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_OVLO_ERR_STAT, "ovlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_OSC_ERR_STAT, "osc-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_PON_ERR_STAT, "pon-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_POFF_ERR_STAT, "poff-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_CMD_SHDN_ERR_STAT, "cmd-shdn-err"),
DEFINE_RES_IRQ_NAMED(BD96801_INT_PRSTB_WDT_ERR, "bd96801-prstb-wdt-err"),
DEFINE_RES_IRQ_NAMED(BD96801_INT_CHIP_IF_ERR, "bd96801-chip-if-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_INT_SHDN_ERR_STAT, "bd96801-int-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_PVIN_ERR_STAT, "bd96801-buck1-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVP_ERR_STAT, "bd96801-buck1-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVP_ERR_STAT, "bd96801-buck1-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_SHDN_ERR_STAT, "bd96801-buck1-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_PVIN_ERR_STAT, "bd96801-buck2-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVP_ERR_STAT, "bd96801-buck2-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVP_ERR_STAT, "bd96801-buck2-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_SHDN_ERR_STAT, "bd96801-buck2-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_PVIN_ERR_STAT, "bd96801-buck3-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVP_ERR_STAT, "bd96801-buck3-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVP_ERR_STAT, "bd96801-buck3-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_SHDN_ERR_STAT, "bd96801-buck3-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_PVIN_ERR_STAT, "bd96801-buck4-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVP_ERR_STAT, "bd96801-buck4-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVP_ERR_STAT, "bd96801-buck4-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_SHDN_ERR_STAT, "bd96801-buck4-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_PVIN_ERR_STAT, "bd96801-ldo5-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVP_ERR_STAT, "bd96801-ldo5-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVP_ERR_STAT, "bd96801-ldo5-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_SHDN_ERR_STAT, "bd96801-ldo5-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_PVIN_ERR_STAT, "bd96801-ldo6-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVP_ERR_STAT, "bd96801-ldo6-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVP_ERR_STAT, "bd96801-ldo6-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_SHDN_ERR_STAT, "bd96801-ldo6-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_PVIN_ERR_STAT, "bd96801-ldo7-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVP_ERR_STAT, "bd96801-ldo7-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVP_ERR_STAT, "bd96801-ldo7-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_SHDN_ERR_STAT, "bd96801-ldo7-shdn-err"),
-};
-
-static const struct resource regulator_intb_irqs[] = {
- DEFINE_RES_IRQ_NAMED(BD96801_TW_STAT, "bd96801-core-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPH_STAT, "bd96801-buck1-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPL_STAT, "bd96801-buck1-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPN_STAT, "bd96801-buck1-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVD_STAT, "bd96801-buck1-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVD_STAT, "bd96801-buck1-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_TW_CH_STAT, "bd96801-buck1-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPH_STAT, "bd96801-buck2-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPL_STAT, "bd96801-buck2-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPN_STAT, "bd96801-buck2-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVD_STAT, "bd96801-buck2-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVD_STAT, "bd96801-buck2-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_TW_CH_STAT, "bd96801-buck2-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPH_STAT, "bd96801-buck3-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPL_STAT, "bd96801-buck3-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPN_STAT, "bd96801-buck3-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVD_STAT, "bd96801-buck3-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVD_STAT, "bd96801-buck3-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_TW_CH_STAT, "bd96801-buck3-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPH_STAT, "bd96801-buck4-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPL_STAT, "bd96801-buck4-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPN_STAT, "bd96801-buck4-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVD_STAT, "bd96801-buck4-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVD_STAT, "bd96801-buck4-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_TW_CH_STAT, "bd96801-buck4-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OCPH_STAT, "bd96801-ldo5-overcurr"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVD_STAT, "bd96801-ldo5-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVD_STAT, "bd96801-ldo5-undervolt"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OCPH_STAT, "bd96801-ldo6-overcurr"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVD_STAT, "bd96801-ldo6-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVD_STAT, "bd96801-ldo6-undervolt"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OCPH_STAT, "bd96801-ldo7-overcurr"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVD_STAT, "bd96801-ldo7-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVD_STAT, "bd96801-ldo7-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_INT_SHDN_ERR_STAT, "int-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_PVIN_ERR_STAT, "buck1-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVP_ERR_STAT, "buck1-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVP_ERR_STAT, "buck1-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_SHDN_ERR_STAT, "buck1-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_PVIN_ERR_STAT, "buck2-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVP_ERR_STAT, "buck2-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVP_ERR_STAT, "buck2-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_SHDN_ERR_STAT, "buck2-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_PVIN_ERR_STAT, "buck3-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVP_ERR_STAT, "buck3-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVP_ERR_STAT, "buck3-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_SHDN_ERR_STAT, "buck3-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_PVIN_ERR_STAT, "buck4-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVP_ERR_STAT, "buck4-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVP_ERR_STAT, "buck4-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_SHDN_ERR_STAT, "buck4-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_PVIN_ERR_STAT, "ldo5-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVP_ERR_STAT, "ldo5-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVP_ERR_STAT, "ldo5-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_SHDN_ERR_STAT, "ldo5-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_PVIN_ERR_STAT, "ldo6-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVP_ERR_STAT, "ldo6-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVP_ERR_STAT, "ldo6-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_SHDN_ERR_STAT, "ldo6-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_PVIN_ERR_STAT, "ldo7-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVP_ERR_STAT, "ldo7-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVP_ERR_STAT, "ldo7-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_SHDN_ERR_STAT, "ldo7-shdn-err"),
+};
+
+static const struct resource bd96802_reg_errb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96802_OTP_ERR_STAT, "otp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_DBIST_ERR_STAT, "dbist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_EEP_ERR_STAT, "eep-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_ABIST_ERR_STAT, "abist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_PRSTB_ERR_STAT, "prstb-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_DRMOS1_ERR_STAT, "drmoserr1"),
+ DEFINE_RES_IRQ_NAMED(BD96802_DRMOS1_ERR_STAT, "drmoserr2"),
+ DEFINE_RES_IRQ_NAMED(BD96802_SLAVE_ERR_STAT, "slave-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_VREF_ERR_STAT, "vref-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_TSD_ERR_STAT, "tsd"),
+ DEFINE_RES_IRQ_NAMED(BD96802_UVLO_ERR_STAT, "uvlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_OVLO_ERR_STAT, "ovlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_OSC_ERR_STAT, "osc-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_PON_ERR_STAT, "pon-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_POFF_ERR_STAT, "poff-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_CMD_SHDN_ERR_STAT, "cmd-shdn-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_INT_SHDN_ERR_STAT, "int-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_PVIN_ERR_STAT, "buck1-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OVP_ERR_STAT, "buck1-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_UVP_ERR_STAT, "buck1-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_SHDN_ERR_STAT, "buck1-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_PVIN_ERR_STAT, "buck2-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OVP_ERR_STAT, "buck2-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_UVP_ERR_STAT, "buck2-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_SHDN_ERR_STAT, "buck2-shdn-err"),
+};
+
+static const struct resource bd96801_reg_intb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96801_TW_STAT, "core-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPH_STAT, "buck1-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPL_STAT, "buck1-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPN_STAT, "buck1-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVD_STAT, "buck1-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVD_STAT, "buck1-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_TW_CH_STAT, "buck1-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPH_STAT, "buck2-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPL_STAT, "buck2-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPN_STAT, "buck2-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVD_STAT, "buck2-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVD_STAT, "buck2-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_TW_CH_STAT, "buck2-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPH_STAT, "buck3-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPL_STAT, "buck3-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPN_STAT, "buck3-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVD_STAT, "buck3-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVD_STAT, "buck3-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_TW_CH_STAT, "buck3-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPH_STAT, "buck4-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPL_STAT, "buck4-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPN_STAT, "buck4-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVD_STAT, "buck4-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVD_STAT, "buck4-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_TW_CH_STAT, "buck4-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OCPH_STAT, "ldo5-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVD_STAT, "ldo5-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVD_STAT, "ldo5-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OCPH_STAT, "ldo6-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVD_STAT, "ldo6-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVD_STAT, "ldo6-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OCPH_STAT, "ldo7-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVD_STAT, "ldo7-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVD_STAT, "ldo7-undervolt"),
+};
+
+static const struct resource bd96802_reg_intb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96802_TW_STAT, "core-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OCPH_STAT, "buck1-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OCPL_STAT, "buck1-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OCPN_STAT, "buck1-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OVD_STAT, "buck1-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_UVD_STAT, "buck1-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_TW_CH_STAT, "buck1-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OCPH_STAT, "buck2-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OCPL_STAT, "buck2-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OCPN_STAT, "buck2-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OVD_STAT, "buck2-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_UVD_STAT, "buck2-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_TW_CH_STAT, "buck2-thermal"),
};
enum {
@@ -152,6 +216,20 @@ static struct mfd_cell bd96801_cells[] = {
[REGULATOR_CELL] = { .name = "bd96801-regulator", },
};
+static struct mfd_cell bd96802_cells[] = {
+ [WDG_CELL] = { .name = "bd96801-wdt", },
+ [REGULATOR_CELL] = { .name = "bd96802-regulator", },
+};
+static struct mfd_cell bd96805_cells[] = {
+ [WDG_CELL] = { .name = "bd96801-wdt", },
+ [REGULATOR_CELL] = { .name = "bd96805-regulator", },
+};
+
+static struct mfd_cell bd96806_cells[] = {
+ [WDG_CELL] = { .name = "bd96806-wdt", },
+ [REGULATOR_CELL] = { .name = "bd96806-regulator", },
+};
+
static const struct regmap_range bd96801_volatile_ranges[] = {
/* Status registers */
regmap_reg_range(BD96801_REG_WD_FEED, BD96801_REG_WD_FAILCOUNT),
@@ -169,11 +247,28 @@ static const struct regmap_range bd96801_volatile_ranges[] = {
regmap_reg_range(BD96801_LDO5_VOL_LVL_REG, BD96801_LDO7_VOL_LVL_REG),
};
-static const struct regmap_access_table volatile_regs = {
+static const struct regmap_range bd96802_volatile_ranges[] = {
+ /* Status regs */
+ regmap_reg_range(BD96801_REG_WD_FEED, BD96801_REG_WD_FAILCOUNT),
+ regmap_reg_range(BD96801_REG_WD_ASK, BD96801_REG_WD_ASK),
+ regmap_reg_range(BD96801_REG_WD_STATUS, BD96801_REG_WD_STATUS),
+ regmap_reg_range(BD96801_REG_PMIC_STATE, BD96801_REG_INT_BUCK2_ERRB),
+ regmap_reg_range(BD96801_REG_INT_SYS_INTB, BD96801_REG_INT_BUCK2_INTB),
+ /* Registers which do not update value unless PMIC is in STBY */
+ regmap_reg_range(BD96801_REG_SSCG_CTRL, BD96801_REG_SHD_INTB),
+ regmap_reg_range(BD96801_REG_BUCK_OVP, BD96801_REG_BOOT_OVERTIME),
+};
+
+static const struct regmap_access_table bd96801_volatile_regs = {
.yes_ranges = bd96801_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(bd96801_volatile_ranges),
};
+static const struct regmap_access_table bd96802_volatile_regs = {
+ .yes_ranges = bd96802_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bd96802_volatile_ranges),
+};
+
/*
* For ERRB we need main register bit mapping as bit(0) indicates active IRQ
* in one of the first 3 sub IRQ registers, For INTB we can use default 1 to 1
@@ -188,7 +283,7 @@ static unsigned int bit5_offsets[] = {7}; /* LDO 5 stat */
static unsigned int bit6_offsets[] = {8}; /* LDO 6 stat */
static unsigned int bit7_offsets[] = {9}; /* LDO 7 stat */
-static const struct regmap_irq_sub_irq_map errb_sub_irq_offsets[] = {
+static const struct regmap_irq_sub_irq_map bd96801_errb_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
@@ -199,6 +294,12 @@ static const struct regmap_irq_sub_irq_map errb_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
};
+static const struct regmap_irq_sub_irq_map bd96802_errb_sub_irq_offsets[] = {
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
+};
+
static const struct regmap_irq bd96801_errb_irqs[] = {
/* Reg 0x52 Fatal ERRB1 */
REGMAP_IRQ_REG(BD96801_OTP_ERR_STAT, 0, BD96801_OTP_ERR_MASK),
@@ -259,6 +360,39 @@ static const struct regmap_irq bd96801_errb_irqs[] = {
REGMAP_IRQ_REG(BD96801_LDO7_SHDN_ERR_STAT, 9, BD96801_OUT_SHDN_ERR_MASK),
};
+static const struct regmap_irq bd96802_errb_irqs[] = {
+ /* Reg 0x52 Fatal ERRB1 */
+ REGMAP_IRQ_REG(BD96802_OTP_ERR_STAT, 0, BD96801_OTP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_DBIST_ERR_STAT, 0, BD96801_DBIST_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_EEP_ERR_STAT, 0, BD96801_EEP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_ABIST_ERR_STAT, 0, BD96801_ABIST_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_PRSTB_ERR_STAT, 0, BD96801_PRSTB_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_DRMOS1_ERR_STAT, 0, BD96801_DRMOS1_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_DRMOS2_ERR_STAT, 0, BD96801_DRMOS2_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_SLAVE_ERR_STAT, 0, BD96801_SLAVE_ERR_MASK),
+ /* 0x53 Fatal ERRB2 */
+ REGMAP_IRQ_REG(BD96802_VREF_ERR_STAT, 1, BD96801_VREF_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_TSD_ERR_STAT, 1, BD96801_TSD_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_UVLO_ERR_STAT, 1, BD96801_UVLO_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_OVLO_ERR_STAT, 1, BD96801_OVLO_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_OSC_ERR_STAT, 1, BD96801_OSC_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_PON_ERR_STAT, 1, BD96801_PON_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_POFF_ERR_STAT, 1, BD96801_POFF_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_CMD_SHDN_ERR_STAT, 1, BD96801_CMD_SHDN_ERR_MASK),
+ /* 0x54 Fatal INTB shadowed to ERRB */
+ REGMAP_IRQ_REG(BD96802_INT_SHDN_ERR_STAT, 2, BD96801_INT_SHDN_ERR_MASK),
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ REGMAP_IRQ_REG(BD96802_BUCK1_PVIN_ERR_STAT, 3, BD96801_OUT_PVIN_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OVP_ERR_STAT, 3, BD96801_OUT_OVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_UVP_ERR_STAT, 3, BD96801_OUT_UVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_SHDN_ERR_STAT, 3, BD96801_OUT_SHDN_ERR_MASK),
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ REGMAP_IRQ_REG(BD96802_BUCK2_PVIN_ERR_STAT, 4, BD96801_OUT_PVIN_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OVP_ERR_STAT, 4, BD96801_OUT_OVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_UVP_ERR_STAT, 4, BD96801_OUT_UVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_SHDN_ERR_STAT, 4, BD96801_OUT_SHDN_ERR_MASK),
+};
+
static const struct regmap_irq bd96801_intb_irqs[] = {
/* STATUS SYSTEM INTB */
REGMAP_IRQ_REG(BD96801_TW_STAT, 0, BD96801_TW_STAT_MASK),
@@ -307,6 +441,69 @@ static const struct regmap_irq bd96801_intb_irqs[] = {
REGMAP_IRQ_REG(BD96801_LDO7_UVD_STAT, 7, BD96801_LDO_UVD_STAT_MASK),
};
+static const struct regmap_irq bd96802_intb_irqs[] = {
+ /* STATUS SYSTEM INTB */
+ REGMAP_IRQ_REG(BD96802_TW_STAT, 0, BD96801_TW_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_WDT_ERR_STAT, 0, BD96801_WDT_ERR_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_I2C_ERR_STAT, 0, BD96801_I2C_ERR_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_CHIP_IF_ERR_STAT, 0, BD96801_CHIP_IF_ERR_STAT_MASK),
+ /* STATUS BUCK1 INTB */
+ REGMAP_IRQ_REG(BD96802_BUCK1_OCPH_STAT, 1, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OCPL_STAT, 1, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OCPN_STAT, 1, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OVD_STAT, 1, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_UVD_STAT, 1, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_TW_CH_STAT, 1, BD96801_BUCK_TW_CH_STAT_MASK),
+ /* BUCK 2 INTB */
+ REGMAP_IRQ_REG(BD96802_BUCK2_OCPH_STAT, 2, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OCPL_STAT, 2, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OCPN_STAT, 2, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OVD_STAT, 2, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_UVD_STAT, 2, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_TW_CH_STAT, 2, BD96801_BUCK_TW_CH_STAT_MASK),
+};
+
+/*
+ * The IRQ stuff is a bit hairy. The BD96801 / BD96802 provide two physical
+ * IRQ lines called INTB and ERRB. They share the same main status register.
+ *
+ * For ERRB, mapping from main status to sub-status is such that the
+ * 'global' faults are mapped to first 3 sub-status registers - and indicated
+ * by the first bit[0] in main status reg.
+ *
+ * Rest of the status registers are for indicating stuff for individual
+ * regulators, 1 sub register / regulator and 1 main status register bit /
+ * regulator, starting from bit[1].
+ *
+ * Eg, regulator specific stuff has 1 to 1 mapping from main-status to sub
+ * registers but 'global' ERRB IRQs require mapping from main status bit[0] to
+ * 3 status registers.
+ *
+ * Furthermore, the BD96801 has 7 regulators where the BD96802 has only 2.
+ *
+ * INTB has only 1 sub status register for 'global' events and then own sub
+ * status register for each of the regulators. So, for INTB we have direct
+ * 1 to 1 mapping - BD96801 just having 5 register and 5 main status bits
+ * more than the BD96802.
+ *
+ * Sharing the main status bits could be a problem if we had both INTB and
+ * ERRB IRQs asserted but for different sub-status offsets. This might lead
+ * IRQ controller code to go read a sub status register which indicates no
+ * active IRQs. I assume this occurring repeteadly might lead the IRQ to be
+ * disabled by core as a result of repeteadly returned IRQ_NONEs.
+ *
+ * I don't consider this as a fatal problem for now because:
+ * a) Having ERRB asserted leads to PMIC fault state which will kill
+ * the SoC powered by the PMIC. (So, relevant only for potential
+ * case of not powering the processor with this PMIC).
+ * b) Having ERRB set without having respective INTB is unlikely
+ * (haven't actually verified this).
+ *
+ * So, let's proceed with main status enabled for both INTB and ERRB. We can
+ * later disable main-status usage on systems where this ever proves to be
+ * a problem.
+ */
+
static const struct regmap_irq_chip bd96801_irq_chip_errb = {
.name = "bd96801-irq-errb",
.domain_suffix = "errb",
@@ -320,7 +517,23 @@ static const struct regmap_irq_chip bd96801_irq_chip_errb = {
.init_ack_masked = true,
.num_regs = 10,
.irq_reg_stride = 1,
- .sub_reg_offsets = &errb_sub_irq_offsets[0],
+ .sub_reg_offsets = &bd96801_errb_sub_irq_offsets[0],
+};
+
+static const struct regmap_irq_chip bd96802_irq_chip_errb = {
+ .name = "bd96802-irq-errb",
+ .domain_suffix = "errb",
+ .main_status = BD96801_REG_INT_MAIN,
+ .num_main_regs = 1,
+ .irqs = &bd96802_errb_irqs[0],
+ .num_irqs = ARRAY_SIZE(bd96802_errb_irqs),
+ .status_base = BD96801_REG_INT_SYS_ERRB1,
+ .mask_base = BD96801_REG_MASK_SYS_ERRB,
+ .ack_base = BD96801_REG_INT_SYS_ERRB1,
+ .init_ack_masked = true,
+ .num_regs = 5,
+ .irq_reg_stride = 1,
+ .sub_reg_offsets = &bd96802_errb_sub_irq_offsets[0],
};
static const struct regmap_irq_chip bd96801_irq_chip_intb = {
@@ -338,25 +551,124 @@ static const struct regmap_irq_chip bd96801_irq_chip_intb = {
.irq_reg_stride = 1,
};
+static const struct regmap_irq_chip bd96802_irq_chip_intb = {
+ .name = "bd96802-irq-intb",
+ .domain_suffix = "intb",
+ .main_status = BD96801_REG_INT_MAIN,
+ .num_main_regs = 1,
+ .irqs = &bd96802_intb_irqs[0],
+ .num_irqs = ARRAY_SIZE(bd96802_intb_irqs),
+ .status_base = BD96801_REG_INT_SYS_INTB,
+ .mask_base = BD96801_REG_MASK_SYS_INTB,
+ .ack_base = BD96801_REG_INT_SYS_INTB,
+ .init_ack_masked = true,
+ .num_regs = 3,
+ .irq_reg_stride = 1,
+};
+
static const struct regmap_config bd96801_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .volatile_table = &volatile_regs,
+ .volatile_table = &bd96801_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct regmap_config bd96802_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &bd96802_volatile_regs,
.cache_type = REGCACHE_MAPLE,
};
+static const struct bd968xx bd96801_data = {
+ .errb_irqs = bd96801_reg_errb_irqs,
+ .intb_irqs = bd96801_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96801_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96801_reg_intb_irqs),
+ .errb_irq_chip = &bd96801_irq_chip_errb,
+ .intb_irq_chip = &bd96801_irq_chip_intb,
+ .regmap_config = &bd96801_regmap_config,
+ .cells = bd96801_cells,
+ .num_cells = ARRAY_SIZE(bd96801_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
+static const struct bd968xx bd96802_data = {
+ .errb_irqs = bd96802_reg_errb_irqs,
+ .intb_irqs = bd96802_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96802_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96802_reg_intb_irqs),
+ .errb_irq_chip = &bd96802_irq_chip_errb,
+ .intb_irq_chip = &bd96802_irq_chip_intb,
+ .regmap_config = &bd96802_regmap_config,
+ .cells = bd96802_cells,
+ .num_cells = ARRAY_SIZE(bd96802_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
+static const struct bd968xx bd96805_data = {
+ .errb_irqs = bd96801_reg_errb_irqs,
+ .intb_irqs = bd96801_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96801_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96801_reg_intb_irqs),
+ .errb_irq_chip = &bd96801_irq_chip_errb,
+ .intb_irq_chip = &bd96801_irq_chip_intb,
+ .regmap_config = &bd96801_regmap_config,
+ .cells = bd96805_cells,
+ .num_cells = ARRAY_SIZE(bd96805_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
+static struct bd968xx bd96806_data = {
+ .errb_irqs = bd96802_reg_errb_irqs,
+ .intb_irqs = bd96802_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96802_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96802_reg_intb_irqs),
+ .errb_irq_chip = &bd96802_irq_chip_errb,
+ .intb_irq_chip = &bd96802_irq_chip_intb,
+ .regmap_config = &bd96802_regmap_config,
+ .cells = bd96806_cells,
+ .num_cells = ARRAY_SIZE(bd96806_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
static int bd96801_i2c_probe(struct i2c_client *i2c)
{
struct regmap_irq_chip_data *intb_irq_data, *errb_irq_data;
struct irq_domain *intb_domain, *errb_domain;
+ const struct bd968xx *ddata;
const struct fwnode_handle *fwnode;
struct resource *regulator_res;
struct resource wdg_irq;
struct regmap *regmap;
- int intb_irq, errb_irq, num_intb, num_errb = 0;
+ int intb_irq, errb_irq, num_errb = 0;
int num_regu_irqs, wdg_irq_no;
+ unsigned int chip_type;
int i, ret;
+ chip_type = (unsigned int)(uintptr_t)device_get_match_data(&i2c->dev);
+ switch (chip_type) {
+ case ROHM_CHIP_TYPE_BD96801:
+ ddata = &bd96801_data;
+ break;
+ case ROHM_CHIP_TYPE_BD96802:
+ ddata = &bd96802_data;
+ break;
+ case ROHM_CHIP_TYPE_BD96805:
+ ddata = &bd96805_data;
+ break;
+ case ROHM_CHIP_TYPE_BD96806:
+ ddata = &bd96806_data;
+ break;
+ default:
+ dev_err(&i2c->dev, "Unknown IC\n");
+ return -EINVAL;
+ }
+
fwnode = dev_fwnode(&i2c->dev);
if (!fwnode)
return dev_err_probe(&i2c->dev, -EINVAL, "Failed to find fwnode\n");
@@ -365,34 +677,32 @@ static int bd96801_i2c_probe(struct i2c_client *i2c)
if (intb_irq < 0)
return dev_err_probe(&i2c->dev, intb_irq, "INTB IRQ not configured\n");
- num_intb = ARRAY_SIZE(regulator_intb_irqs);
-
/* ERRB may be omitted if processor is powered by the PMIC */
errb_irq = fwnode_irq_get_byname(fwnode, "errb");
- if (errb_irq < 0)
- errb_irq = 0;
+ if (errb_irq == -EPROBE_DEFER)
+ return errb_irq;
- if (errb_irq)
- num_errb = ARRAY_SIZE(regulator_errb_irqs);
+ if (errb_irq > 0)
+ num_errb = ddata->num_errb_irqs;
- num_regu_irqs = num_intb + num_errb;
+ num_regu_irqs = ddata->num_intb_irqs + num_errb;
regulator_res = devm_kcalloc(&i2c->dev, num_regu_irqs,
sizeof(*regulator_res), GFP_KERNEL);
if (!regulator_res)
return -ENOMEM;
- regmap = devm_regmap_init_i2c(i2c, &bd96801_regmap_config);
+ regmap = devm_regmap_init_i2c(i2c, ddata->regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(&i2c->dev, PTR_ERR(regmap),
"Regmap initialization failed\n");
- ret = regmap_write(regmap, BD96801_LOCK_REG, BD96801_UNLOCK);
+ ret = regmap_write(regmap, ddata->unlock_reg, ddata->unlock_val);
if (ret)
return dev_err_probe(&i2c->dev, ret, "Failed to unlock PMIC\n");
ret = devm_regmap_add_irq_chip(&i2c->dev, regmap, intb_irq,
- IRQF_ONESHOT, 0, &bd96801_irq_chip_intb,
+ IRQF_ONESHOT, 0, ddata->intb_irq_chip,
&intb_irq_data);
if (ret)
return dev_err_probe(&i2c->dev, ret, "Failed to add INTB IRQ chip\n");
@@ -404,24 +714,25 @@ static int bd96801_i2c_probe(struct i2c_client *i2c)
* has two domains so we do IRQ mapping here and provide the
* already mapped IRQ numbers to sub-devices.
*/
- for (i = 0; i < num_intb; i++) {
+ for (i = 0; i < ddata->num_intb_irqs; i++) {
struct resource *res = &regulator_res[i];
- *res = regulator_intb_irqs[i];
+ *res = ddata->intb_irqs[i];
res->start = res->end = irq_create_mapping(intb_domain,
res->start);
}
wdg_irq_no = irq_create_mapping(intb_domain, BD96801_WDT_ERR_STAT);
wdg_irq = DEFINE_RES_IRQ_NAMED(wdg_irq_no, "bd96801-wdg");
- bd96801_cells[WDG_CELL].resources = &wdg_irq;
- bd96801_cells[WDG_CELL].num_resources = 1;
+
+ ddata->cells[WDG_CELL].resources = &wdg_irq;
+ ddata->cells[WDG_CELL].num_resources = 1;
if (!num_errb)
goto skip_errb;
ret = devm_regmap_add_irq_chip(&i2c->dev, regmap, errb_irq, IRQF_ONESHOT,
- 0, &bd96801_irq_chip_errb, &errb_irq_data);
+ 0, ddata->errb_irq_chip, &errb_irq_data);
if (ret)
return dev_err_probe(&i2c->dev, ret,
"Failed to add ERRB IRQ chip\n");
@@ -429,18 +740,17 @@ static int bd96801_i2c_probe(struct i2c_client *i2c)
errb_domain = regmap_irq_get_domain(errb_irq_data);
for (i = 0; i < num_errb; i++) {
- struct resource *res = &regulator_res[num_intb + i];
+ struct resource *res = &regulator_res[ddata->num_intb_irqs + i];
- *res = regulator_errb_irqs[i];
+ *res = ddata->errb_irqs[i];
res->start = res->end = irq_create_mapping(errb_domain, res->start);
}
skip_errb:
- bd96801_cells[REGULATOR_CELL].resources = regulator_res;
- bd96801_cells[REGULATOR_CELL].num_resources = num_regu_irqs;
-
- ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, bd96801_cells,
- ARRAY_SIZE(bd96801_cells), NULL, 0, NULL);
+ ddata->cells[REGULATOR_CELL].resources = regulator_res;
+ ddata->cells[REGULATOR_CELL].num_resources = num_regu_irqs;
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, ddata->cells,
+ ddata->num_cells, NULL, 0, NULL);
if (ret)
dev_err_probe(&i2c->dev, ret, "Failed to create subdevices\n");
@@ -448,7 +758,10 @@ skip_errb:
}
static const struct of_device_id bd96801_of_match[] = {
- { .compatible = "rohm,bd96801", },
+ { .compatible = "rohm,bd96801", .data = (void *)ROHM_CHIP_TYPE_BD96801 },
+ { .compatible = "rohm,bd96802", .data = (void *)ROHM_CHIP_TYPE_BD96802 },
+ { .compatible = "rohm,bd96805", .data = (void *)ROHM_CHIP_TYPE_BD96805 },
+ { .compatible = "rohm,bd96806", .data = (void *)ROHM_CHIP_TYPE_BD96806 },
{ }
};
MODULE_DEVICE_TABLE(of, bd96801_of_match);
@@ -476,5 +789,5 @@ static void __exit bd96801_i2c_exit(void)
module_exit(bd96801_i2c_exit);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("ROHM BD96801 Power Management IC driver");
+MODULE_DESCRIPTION("ROHM BD9680X Power Management IC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index 84ebc96f58e4..2204bf1c5a51 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -98,7 +98,11 @@ static int rt5033_i2c_probe(struct i2c_client *i2c)
return ret;
}
- device_init_wakeup(rt5033->dev, rt5033->wakeup);
+ if (rt5033->wakeup) {
+ ret = devm_device_init_wakeup(rt5033->dev);
+ if (ret)
+ return dev_err_probe(rt5033->dev, ret, "Failed to init wakeup\n");
+ }
return 0;
}
diff --git a/drivers/mfd/sec-acpm.c b/drivers/mfd/sec-acpm.c
new file mode 100644
index 000000000000..8b31c816d65b
--- /dev/null
+++ b/drivers/mfd/sec-acpm.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google Inc
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung S2MPG1x ACPM driver
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/rtc.h>
+#include <linux/mfd/samsung/s2mpg10.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+#define ACPM_ADDR_BITS 8
+#define ACPM_MAX_BULK_DATA 8
+
+struct sec_pmic_acpm_platform_data {
+ int device_type;
+
+ unsigned int acpm_chan_id;
+ u8 speedy_channel;
+
+ const struct regmap_config *regmap_cfg_common;
+ const struct regmap_config *regmap_cfg_pmic;
+ const struct regmap_config *regmap_cfg_rtc;
+ const struct regmap_config *regmap_cfg_meter;
+};
+
+static const struct regmap_range s2mpg10_common_registers[] = {
+ regmap_reg_range(0x00, 0x02), /* CHIP_ID_M, INT, INT_MASK */
+ regmap_reg_range(0x0a, 0x0c), /* Speedy control */
+ regmap_reg_range(0x1a, 0x2a), /* Debug */
+};
+
+static const struct regmap_range s2mpg10_common_ro_registers[] = {
+ regmap_reg_range(0x00, 0x01), /* CHIP_ID_M, INT */
+ regmap_reg_range(0x28, 0x2a), /* Debug */
+};
+
+static const struct regmap_range s2mpg10_common_nonvolatile_registers[] = {
+ regmap_reg_range(0x00, 0x00), /* CHIP_ID_M */
+ regmap_reg_range(0x02, 0x02), /* INT_MASK */
+ regmap_reg_range(0x0a, 0x0c), /* Speedy control */
+};
+
+static const struct regmap_range s2mpg10_common_precious_registers[] = {
+ regmap_reg_range(0x01, 0x01), /* INT */
+};
+
+static const struct regmap_access_table s2mpg10_common_wr_table = {
+ .yes_ranges = s2mpg10_common_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_common_registers),
+ .no_ranges = s2mpg10_common_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_common_ro_registers),
+};
+
+static const struct regmap_access_table s2mpg10_common_rd_table = {
+ .yes_ranges = s2mpg10_common_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_common_registers),
+};
+
+static const struct regmap_access_table s2mpg10_common_volatile_table = {
+ .no_ranges = s2mpg10_common_nonvolatile_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_common_nonvolatile_registers),
+};
+
+static const struct regmap_access_table s2mpg10_common_precious_table = {
+ .yes_ranges = s2mpg10_common_precious_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_common_precious_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_common = {
+ .name = "common",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_COMMON_SPD_DEBUG4,
+ .wr_table = &s2mpg10_common_wr_table,
+ .rd_table = &s2mpg10_common_rd_table,
+ .volatile_table = &s2mpg10_common_volatile_table,
+ .precious_table = &s2mpg10_common_precious_table,
+ .num_reg_defaults_raw = S2MPG10_COMMON_SPD_DEBUG4 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range s2mpg10_pmic_registers[] = {
+ regmap_reg_range(0x00, 0xf6), /* All PMIC registers */
+};
+
+static const struct regmap_range s2mpg10_pmic_ro_registers[] = {
+ regmap_reg_range(0x00, 0x05), /* INTx */
+ regmap_reg_range(0x0c, 0x0f), /* STATUSx PWRONSRC OFFSRC */
+ regmap_reg_range(0xc7, 0xc7), /* GPIO input */
+};
+
+static const struct regmap_range s2mpg10_pmic_nonvolatile_registers[] = {
+ regmap_reg_range(0x06, 0x0b), /* INTxM */
+};
+
+static const struct regmap_range s2mpg10_pmic_precious_registers[] = {
+ regmap_reg_range(0x00, 0x05), /* INTx */
+};
+
+static const struct regmap_access_table s2mpg10_pmic_wr_table = {
+ .yes_ranges = s2mpg10_pmic_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_pmic_registers),
+ .no_ranges = s2mpg10_pmic_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_pmic_ro_registers),
+};
+
+static const struct regmap_access_table s2mpg10_pmic_rd_table = {
+ .yes_ranges = s2mpg10_pmic_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_pmic_registers),
+};
+
+static const struct regmap_access_table s2mpg10_pmic_volatile_table = {
+ .no_ranges = s2mpg10_pmic_nonvolatile_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_pmic_nonvolatile_registers),
+};
+
+static const struct regmap_access_table s2mpg10_pmic_precious_table = {
+ .yes_ranges = s2mpg10_pmic_precious_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_pmic_precious_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_pmic = {
+ .name = "pmic",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_PMIC_LDO_SENSE4,
+ .wr_table = &s2mpg10_pmic_wr_table,
+ .rd_table = &s2mpg10_pmic_rd_table,
+ .volatile_table = &s2mpg10_pmic_volatile_table,
+ .precious_table = &s2mpg10_pmic_precious_table,
+ .num_reg_defaults_raw = S2MPG10_PMIC_LDO_SENSE4 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range s2mpg10_rtc_registers[] = {
+ regmap_reg_range(0x00, 0x2b), /* All RTC registers */
+};
+
+static const struct regmap_range s2mpg10_rtc_volatile_registers[] = {
+ regmap_reg_range(0x01, 0x01), /* RTC_UPDATE */
+ regmap_reg_range(0x05, 0x0c), /* Time / date */
+};
+
+static const struct regmap_access_table s2mpg10_rtc_rd_table = {
+ .yes_ranges = s2mpg10_rtc_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_rtc_registers),
+};
+
+static const struct regmap_access_table s2mpg10_rtc_volatile_table = {
+ .yes_ranges = s2mpg10_rtc_volatile_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_rtc_volatile_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_rtc = {
+ .name = "rtc",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_RTC_OSC_CTRL,
+ .rd_table = &s2mpg10_rtc_rd_table,
+ .volatile_table = &s2mpg10_rtc_volatile_table,
+ .num_reg_defaults_raw = S2MPG10_RTC_OSC_CTRL + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range s2mpg10_meter_registers[] = {
+ regmap_reg_range(0x00, 0x21), /* Meter config */
+ regmap_reg_range(0x40, 0x8a), /* Meter data */
+ regmap_reg_range(0xee, 0xee), /* Offset */
+ regmap_reg_range(0xf1, 0xf1), /* Trim */
+};
+
+static const struct regmap_range s2mpg10_meter_ro_registers[] = {
+ regmap_reg_range(0x40, 0x8a), /* Meter data */
+};
+
+static const struct regmap_access_table s2mpg10_meter_wr_table = {
+ .yes_ranges = s2mpg10_meter_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_meter_registers),
+ .no_ranges = s2mpg10_meter_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_meter_ro_registers),
+};
+
+static const struct regmap_access_table s2mpg10_meter_rd_table = {
+ .yes_ranges = s2mpg10_meter_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_meter_registers),
+};
+
+static const struct regmap_access_table s2mpg10_meter_volatile_table = {
+ .yes_ranges = s2mpg10_meter_ro_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_meter_ro_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_meter = {
+ .name = "meter",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_METER_BUCK_METER_TRIM3,
+ .wr_table = &s2mpg10_meter_wr_table,
+ .rd_table = &s2mpg10_meter_rd_table,
+ .volatile_table = &s2mpg10_meter_volatile_table,
+ .num_reg_defaults_raw = S2MPG10_METER_BUCK_METER_TRIM3 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+struct sec_pmic_acpm_shared_bus_context {
+ const struct acpm_handle *acpm;
+ unsigned int acpm_chan_id;
+ u8 speedy_channel;
+};
+
+enum sec_pmic_acpm_accesstype {
+ SEC_PMIC_ACPM_ACCESSTYPE_COMMON = 0x00,
+ SEC_PMIC_ACPM_ACCESSTYPE_PMIC = 0x01,
+ SEC_PMIC_ACPM_ACCESSTYPE_RTC = 0x02,
+ SEC_PMIC_ACPM_ACCESSTYPE_METER = 0x0a,
+ SEC_PMIC_ACPM_ACCESSTYPE_WLWP = 0x0b,
+ SEC_PMIC_ACPM_ACCESSTYPE_TRIM = 0x0f,
+};
+
+struct sec_pmic_acpm_bus_context {
+ struct sec_pmic_acpm_shared_bus_context *shared;
+ enum sec_pmic_acpm_accesstype type;
+};
+
+static int sec_pmic_acpm_bus_write(void *context, const void *data,
+ size_t count)
+{
+ struct sec_pmic_acpm_bus_context *ctx = context;
+ const struct acpm_handle *acpm = ctx->shared->acpm;
+ const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+ size_t val_count = count - BITS_TO_BYTES(ACPM_ADDR_BITS);
+ const u8 *d = data;
+ const u8 *vals = &d[BITS_TO_BYTES(ACPM_ADDR_BITS)];
+ u8 reg;
+
+ if (val_count < 1 || val_count > ACPM_MAX_BULK_DATA)
+ return -EINVAL;
+
+ reg = d[0];
+
+ return pmic_ops->bulk_write(acpm, ctx->shared->acpm_chan_id, ctx->type, reg,
+ ctx->shared->speedy_channel, val_count, vals);
+}
+
+static int sec_pmic_acpm_bus_read(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size)
+{
+ struct sec_pmic_acpm_bus_context *ctx = context;
+ const struct acpm_handle *acpm = ctx->shared->acpm;
+ const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+ const u8 *r = reg_buf;
+ u8 reg;
+
+ if (reg_size != BITS_TO_BYTES(ACPM_ADDR_BITS) || !val_size ||
+ val_size > ACPM_MAX_BULK_DATA)
+ return -EINVAL;
+
+ reg = r[0];
+
+ return pmic_ops->bulk_read(acpm, ctx->shared->acpm_chan_id, ctx->type, reg,
+ ctx->shared->speedy_channel, val_size, val_buf);
+}
+
+static int sec_pmic_acpm_bus_reg_update_bits(void *context, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ struct sec_pmic_acpm_bus_context *ctx = context;
+ const struct acpm_handle *acpm = ctx->shared->acpm;
+ const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+
+ return pmic_ops->update_reg(acpm, ctx->shared->acpm_chan_id, ctx->type, reg & 0xff,
+ ctx->shared->speedy_channel, val, mask);
+}
+
+static const struct regmap_bus sec_pmic_acpm_regmap_bus = {
+ .write = sec_pmic_acpm_bus_write,
+ .read = sec_pmic_acpm_bus_read,
+ .reg_update_bits = sec_pmic_acpm_bus_reg_update_bits,
+ .max_raw_read = ACPM_MAX_BULK_DATA,
+ .max_raw_write = ACPM_MAX_BULK_DATA,
+};
+
+static struct regmap *sec_pmic_acpm_regmap_init(struct device *dev,
+ struct sec_pmic_acpm_shared_bus_context *shared_ctx,
+ enum sec_pmic_acpm_accesstype type,
+ const struct regmap_config *cfg, bool do_attach)
+{
+ struct sec_pmic_acpm_bus_context *ctx;
+ struct regmap *regmap;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->shared = shared_ctx;
+ ctx->type = type;
+
+ regmap = devm_regmap_init(dev, &sec_pmic_acpm_regmap_bus, ctx, cfg);
+ if (IS_ERR(regmap))
+ return dev_err_cast_probe(dev, regmap, "regmap init (%s) failed\n", cfg->name);
+
+ if (do_attach) {
+ int ret;
+
+ ret = regmap_attach_dev(dev, regmap, cfg);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret, "regmap attach (%s) failed\n",
+ cfg->name);
+ }
+
+ return regmap;
+}
+
+static void sec_pmic_acpm_mask_common_irqs(void *regmap_common)
+{
+ regmap_write(regmap_common, S2MPG10_COMMON_INT_MASK, S2MPG10_COMMON_INT_SRC);
+}
+
+static int sec_pmic_acpm_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap_common, *regmap_pmic, *regmap;
+ const struct sec_pmic_acpm_platform_data *pdata;
+ struct sec_pmic_acpm_shared_bus_context *shared_ctx;
+ const struct acpm_handle *acpm;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ pdata = device_get_match_data(dev);
+ if (!pdata)
+ return dev_err_probe(dev, -ENODEV, "unsupported device type\n");
+
+ acpm = devm_acpm_get_by_node(dev, dev->parent->of_node);
+ if (IS_ERR(acpm))
+ return dev_err_probe(dev, PTR_ERR(acpm), "failed to get acpm\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ shared_ctx = devm_kzalloc(dev, sizeof(*shared_ctx), GFP_KERNEL);
+ if (!shared_ctx)
+ return -ENOMEM;
+
+ shared_ctx->acpm = acpm;
+ shared_ctx->acpm_chan_id = pdata->acpm_chan_id;
+ shared_ctx->speedy_channel = pdata->speedy_channel;
+
+ regmap_common = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_COMMON,
+ pdata->regmap_cfg_common, false);
+ if (IS_ERR(regmap_common))
+ return PTR_ERR(regmap_common);
+
+ /* Mask all interrupts from 'common' block, until successful init */
+ ret = regmap_write(regmap_common, S2MPG10_COMMON_INT_MASK, S2MPG10_COMMON_INT_SRC);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to mask common block interrupts\n");
+
+ regmap_pmic = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_PMIC,
+ pdata->regmap_cfg_pmic, false);
+ if (IS_ERR(regmap_pmic))
+ return PTR_ERR(regmap_pmic);
+
+ regmap = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_RTC,
+ pdata->regmap_cfg_rtc, true);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ regmap = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_METER,
+ pdata->regmap_cfg_meter, true);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = sec_pmic_probe(dev, pdata->device_type, irq, regmap_pmic, NULL);
+ if (ret)
+ return ret;
+
+ if (device_property_read_bool(dev, "wakeup-source"))
+ devm_device_init_wakeup(dev);
+
+ /* Unmask PMIC interrupt from 'common' block, now that everything is in place. */
+ ret = regmap_clear_bits(regmap_common, S2MPG10_COMMON_INT_MASK,
+ S2MPG10_COMMON_INT_SRC_PMIC);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to unmask PMIC interrupt\n");
+
+ /* Mask all interrupts from 'common' block on shutdown */
+ ret = devm_add_action_or_reset(dev, sec_pmic_acpm_mask_common_irqs, regmap_common);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sec_pmic_acpm_shutdown(struct platform_device *pdev)
+{
+ sec_pmic_shutdown(&pdev->dev);
+}
+
+static const struct sec_pmic_acpm_platform_data s2mpg10_data = {
+ .device_type = S2MPG10,
+ .acpm_chan_id = 2,
+ .speedy_channel = 0,
+ .regmap_cfg_common = &s2mpg10_regmap_config_common,
+ .regmap_cfg_pmic = &s2mpg10_regmap_config_pmic,
+ .regmap_cfg_rtc = &s2mpg10_regmap_config_rtc,
+ .regmap_cfg_meter = &s2mpg10_regmap_config_meter,
+};
+
+static const struct of_device_id sec_pmic_acpm_of_match[] = {
+ { .compatible = "samsung,s2mpg10-pmic", .data = &s2mpg10_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sec_pmic_acpm_of_match);
+
+static struct platform_driver sec_pmic_acpm_driver = {
+ .driver = {
+ .name = "sec-pmic-acpm",
+ .pm = pm_sleep_ptr(&sec_pmic_pm_ops),
+ .of_match_table = sec_pmic_acpm_of_match,
+ },
+ .probe = sec_pmic_acpm_probe,
+ .shutdown = sec_pmic_acpm_shutdown,
+};
+module_platform_driver(sec_pmic_acpm_driver);
+
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("ACPM driver for the Samsung S2MPG1x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-common.c b/drivers/mfd/sec-common.c
new file mode 100644
index 000000000000..42d55e70e34c
--- /dev/null
+++ b/drivers/mfd/sec-common.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung SxM core driver
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps13.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+static const struct mfd_cell s5m8767_devs[] = {
+ MFD_CELL_NAME("s5m8767-pmic"),
+ MFD_CELL_NAME("s5m-rtc"),
+ MFD_CELL_OF("s5m8767-clk", NULL, NULL, 0, 0, "samsung,s5m8767-clk"),
+};
+
+static const struct mfd_cell s2dos05_devs[] = {
+ MFD_CELL_NAME("s2dos05-regulator"),
+};
+
+static const struct mfd_cell s2mpg10_devs[] = {
+ MFD_CELL_NAME("s2mpg10-meter"),
+ MFD_CELL_NAME("s2mpg10-regulator"),
+ MFD_CELL_NAME("s2mpg10-rtc"),
+ MFD_CELL_OF("s2mpg10-clk", NULL, NULL, 0, 0, "samsung,s2mpg10-clk"),
+ MFD_CELL_OF("s2mpg10-gpio", NULL, NULL, 0, 0, "samsung,s2mpg10-gpio"),
+};
+
+static const struct mfd_cell s2mps11_devs[] = {
+ MFD_CELL_NAME("s2mps11-regulator"),
+ MFD_CELL_NAME("s2mps14-rtc"),
+ MFD_CELL_OF("s2mps11-clk", NULL, NULL, 0, 0, "samsung,s2mps11-clk"),
+};
+
+static const struct mfd_cell s2mps13_devs[] = {
+ MFD_CELL_NAME("s2mps13-regulator"),
+ MFD_CELL_NAME("s2mps13-rtc"),
+ MFD_CELL_OF("s2mps13-clk", NULL, NULL, 0, 0, "samsung,s2mps13-clk"),
+};
+
+static const struct mfd_cell s2mps14_devs[] = {
+ MFD_CELL_NAME("s2mps14-regulator"),
+ MFD_CELL_NAME("s2mps14-rtc"),
+ MFD_CELL_OF("s2mps14-clk", NULL, NULL, 0, 0, "samsung,s2mps14-clk"),
+};
+
+static const struct mfd_cell s2mps15_devs[] = {
+ MFD_CELL_NAME("s2mps15-regulator"),
+ MFD_CELL_NAME("s2mps15-rtc"),
+ MFD_CELL_OF("s2mps13-clk", NULL, NULL, 0, 0, "samsung,s2mps13-clk"),
+};
+
+static const struct mfd_cell s2mpa01_devs[] = {
+ MFD_CELL_NAME("s2mpa01-pmic"),
+ MFD_CELL_NAME("s2mps14-rtc"),
+};
+
+static const struct mfd_cell s2mpu02_devs[] = {
+ MFD_CELL_NAME("s2mpu02-regulator"),
+};
+
+static const struct mfd_cell s2mpu05_devs[] = {
+ MFD_CELL_NAME("s2mpu05-regulator"),
+ MFD_CELL_NAME("s2mps15-rtc"),
+};
+
+static void sec_pmic_dump_rev(struct sec_pmic_dev *sec_pmic)
+{
+ unsigned int val;
+
+ /* For s2mpg1x, the revision is in a different regmap */
+ if (sec_pmic->device_type == S2MPG10)
+ return;
+
+ /* For each device type, the REG_ID is always the first register */
+ if (!regmap_read(sec_pmic->regmap_pmic, S2MPS11_REG_ID, &val))
+ dev_dbg(sec_pmic->dev, "Revision: 0x%x\n", val);
+}
+
+static void sec_pmic_configure(struct sec_pmic_dev *sec_pmic)
+{
+ int err;
+
+ if (sec_pmic->device_type != S2MPS13X)
+ return;
+
+ if (sec_pmic->pdata->disable_wrstbi) {
+ /*
+ * If WRSTBI pin is pulled down this feature must be disabled
+ * because each Suspend to RAM will trigger buck voltage reset
+ * to default values.
+ */
+ err = regmap_update_bits(sec_pmic->regmap_pmic,
+ S2MPS13_REG_WRSTBI,
+ S2MPS13_REG_WRSTBI_MASK, 0x0);
+ if (err)
+ dev_warn(sec_pmic->dev,
+ "Cannot initialize WRSTBI config: %d\n",
+ err);
+ }
+}
+
+/*
+ * Only the common platform data elements for s5m8767 are parsed here from the
+ * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
+ * others have to parse their own platform data elements from device tree.
+ *
+ * The s5m8767 platform data structure is instantiated here and the drivers for
+ * the sub-modules need not instantiate another instance while parsing their
+ * platform data.
+ */
+static struct sec_platform_data *
+sec_pmic_parse_dt_pdata(struct device *dev)
+{
+ struct sec_platform_data *pd;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->manual_poweroff = of_property_read_bool(dev->of_node,
+ "samsung,s2mps11-acokb-ground");
+ pd->disable_wrstbi = of_property_read_bool(dev->of_node,
+ "samsung,s2mps11-wrstbi-ground");
+ return pd;
+}
+
+int sec_pmic_probe(struct device *dev, int device_type, unsigned int irq,
+ struct regmap *regmap, struct i2c_client *client)
+{
+ struct sec_platform_data *pdata;
+ const struct mfd_cell *sec_devs;
+ struct sec_pmic_dev *sec_pmic;
+ int ret, num_sec_devs;
+
+ sec_pmic = devm_kzalloc(dev, sizeof(*sec_pmic), GFP_KERNEL);
+ if (!sec_pmic)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, sec_pmic);
+ sec_pmic->dev = dev;
+ sec_pmic->device_type = device_type;
+ sec_pmic->i2c = client;
+ sec_pmic->irq = irq;
+ sec_pmic->regmap_pmic = regmap;
+
+ pdata = sec_pmic_parse_dt_pdata(sec_pmic->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ return ret;
+ }
+
+ sec_pmic->pdata = pdata;
+
+ ret = sec_irq_init(sec_pmic);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_active(sec_pmic->dev);
+
+ switch (sec_pmic->device_type) {
+ case S5M8767X:
+ sec_devs = s5m8767_devs;
+ num_sec_devs = ARRAY_SIZE(s5m8767_devs);
+ break;
+ case S2DOS05:
+ sec_devs = s2dos05_devs;
+ num_sec_devs = ARRAY_SIZE(s2dos05_devs);
+ break;
+ case S2MPA01:
+ sec_devs = s2mpa01_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpa01_devs);
+ break;
+ case S2MPG10:
+ sec_devs = s2mpg10_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpg10_devs);
+ break;
+ case S2MPS11X:
+ sec_devs = s2mps11_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps11_devs);
+ break;
+ case S2MPS13X:
+ sec_devs = s2mps13_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps13_devs);
+ break;
+ case S2MPS14X:
+ sec_devs = s2mps14_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps14_devs);
+ break;
+ case S2MPS15X:
+ sec_devs = s2mps15_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps15_devs);
+ break;
+ case S2MPU02:
+ sec_devs = s2mpu02_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpu02_devs);
+ break;
+ case S2MPU05:
+ sec_devs = s2mpu05_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpu05_devs);
+ break;
+ default:
+ return dev_err_probe(sec_pmic->dev, -EINVAL,
+ "Unsupported device type %d\n",
+ sec_pmic->device_type);
+ }
+ ret = devm_mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs,
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ sec_pmic_configure(sec_pmic);
+ sec_pmic_dump_rev(sec_pmic);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sec_pmic_probe);
+
+void sec_pmic_shutdown(struct device *dev)
+{
+ struct sec_pmic_dev *sec_pmic = dev_get_drvdata(dev);
+ unsigned int reg, mask;
+
+ if (!sec_pmic->pdata->manual_poweroff)
+ return;
+
+ switch (sec_pmic->device_type) {
+ case S2MPS11X:
+ reg = S2MPS11_REG_CTRL1;
+ mask = S2MPS11_CTRL1_PWRHOLD_MASK;
+ break;
+ default:
+ /*
+ * Currently only one board with S2MPS11 needs this, so just
+ * ignore the rest.
+ */
+ dev_warn(sec_pmic->dev,
+ "Unsupported device %d for manual power off\n",
+ sec_pmic->device_type);
+ return;
+ }
+
+ regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, 0);
+}
+EXPORT_SYMBOL_GPL(sec_pmic_shutdown);
+
+static int sec_pmic_suspend(struct device *dev)
+{
+ struct sec_pmic_dev *sec_pmic = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(sec_pmic->irq);
+ /*
+ * PMIC IRQ must be disabled during suspend for RTC alarm
+ * to work properly.
+ * When device is woken up from suspend, an
+ * interrupt occurs before resuming I2C bus controller.
+ * The interrupt is handled by regmap_irq_thread which tries
+ * to read RTC registers. This read fails (I2C is still
+ * suspended) and RTC Alarm interrupt is disabled.
+ */
+ disable_irq(sec_pmic->irq);
+
+ return 0;
+}
+
+static int sec_pmic_resume(struct device *dev)
+{
+ struct sec_pmic_dev *sec_pmic = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(sec_pmic->irq);
+ enable_irq(sec_pmic->irq);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
+EXPORT_SYMBOL_GPL(sec_pmic_pm_ops);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("Core driver for the Samsung S5M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
deleted file mode 100644
index 3e9b65c988a7..000000000000
--- a/drivers/mfd/sec-core.c
+++ /dev/null
@@ -1,481 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-//
-// Copyright (c) 2012 Samsung Electronics Co., Ltd
-// http://www.samsung.com
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/of.h>
-#include <linux/interrupt.h>
-#include <linux/pm_runtime.h>
-#include <linux/mutex.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/samsung/core.h>
-#include <linux/mfd/samsung/irq.h>
-#include <linux/mfd/samsung/s2mpa01.h>
-#include <linux/mfd/samsung/s2mps11.h>
-#include <linux/mfd/samsung/s2mps13.h>
-#include <linux/mfd/samsung/s2mps14.h>
-#include <linux/mfd/samsung/s2mps15.h>
-#include <linux/mfd/samsung/s2mpu02.h>
-#include <linux/mfd/samsung/s5m8767.h>
-#include <linux/regmap.h>
-
-static const struct mfd_cell s5m8767_devs[] = {
- { .name = "s5m8767-pmic", },
- { .name = "s5m-rtc", },
- {
- .name = "s5m8767-clk",
- .of_compatible = "samsung,s5m8767-clk",
- },
-};
-
-static const struct mfd_cell s2dos05_devs[] = {
- { .name = "s2dos05-regulator", },
-};
-
-static const struct mfd_cell s2mps11_devs[] = {
- { .name = "s2mps11-regulator", },
- { .name = "s2mps14-rtc", },
- {
- .name = "s2mps11-clk",
- .of_compatible = "samsung,s2mps11-clk",
- },
-};
-
-static const struct mfd_cell s2mps13_devs[] = {
- { .name = "s2mps13-regulator", },
- { .name = "s2mps13-rtc", },
- {
- .name = "s2mps13-clk",
- .of_compatible = "samsung,s2mps13-clk",
- },
-};
-
-static const struct mfd_cell s2mps14_devs[] = {
- { .name = "s2mps14-regulator", },
- { .name = "s2mps14-rtc", },
- {
- .name = "s2mps14-clk",
- .of_compatible = "samsung,s2mps14-clk",
- },
-};
-
-static const struct mfd_cell s2mps15_devs[] = {
- { .name = "s2mps15-regulator", },
- { .name = "s2mps15-rtc", },
- {
- .name = "s2mps13-clk",
- .of_compatible = "samsung,s2mps13-clk",
- },
-};
-
-static const struct mfd_cell s2mpa01_devs[] = {
- { .name = "s2mpa01-pmic", },
- { .name = "s2mps14-rtc", },
-};
-
-static const struct mfd_cell s2mpu02_devs[] = {
- { .name = "s2mpu02-regulator", },
-};
-
-static const struct mfd_cell s2mpu05_devs[] = {
- { .name = "s2mpu05-regulator", },
- { .name = "s2mps15-rtc", },
-};
-
-static const struct of_device_id sec_dt_match[] = {
- {
- .compatible = "samsung,s5m8767-pmic",
- .data = (void *)S5M8767X,
- }, {
- .compatible = "samsung,s2dos05",
- .data = (void *)S2DOS05,
- }, {
- .compatible = "samsung,s2mps11-pmic",
- .data = (void *)S2MPS11X,
- }, {
- .compatible = "samsung,s2mps13-pmic",
- .data = (void *)S2MPS13X,
- }, {
- .compatible = "samsung,s2mps14-pmic",
- .data = (void *)S2MPS14X,
- }, {
- .compatible = "samsung,s2mps15-pmic",
- .data = (void *)S2MPS15X,
- }, {
- .compatible = "samsung,s2mpa01-pmic",
- .data = (void *)S2MPA01,
- }, {
- .compatible = "samsung,s2mpu02-pmic",
- .data = (void *)S2MPU02,
- }, {
- .compatible = "samsung,s2mpu05-pmic",
- .data = (void *)S2MPU05,
- }, {
- /* Sentinel */
- },
-};
-MODULE_DEVICE_TABLE(of, sec_dt_match);
-
-static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S2MPA01_REG_INT1M:
- case S2MPA01_REG_INT2M:
- case S2MPA01_REG_INT3M:
- return false;
- default:
- return true;
- }
-}
-
-static bool s2mps11_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S2MPS11_REG_INT1M:
- case S2MPS11_REG_INT2M:
- case S2MPS11_REG_INT3M:
- return false;
- default:
- return true;
- }
-}
-
-static bool s2mpu02_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S2MPU02_REG_INT1M:
- case S2MPU02_REG_INT2M:
- case S2MPU02_REG_INT3M:
- return false;
- default:
- return true;
- }
-}
-
-static const struct regmap_config sec_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
-static const struct regmap_config s2mpa01_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPA01_REG_LDO_OVCB4,
- .volatile_reg = s2mpa01_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps11_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS11_REG_L38CTRL,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps13_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS13_REG_LDODSCH5,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps14_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS14_REG_LDODSCH3,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps15_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS15_REG_LDODSCH4,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mpu02_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPU02_REG_DVSDATA,
- .volatile_reg = s2mpu02_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s5m8767_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S5M8767_REG_LDO28CTRL,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static void sec_pmic_dump_rev(struct sec_pmic_dev *sec_pmic)
-{
- unsigned int val;
-
- /* For each device type, the REG_ID is always the first register */
- if (!regmap_read(sec_pmic->regmap_pmic, S2MPS11_REG_ID, &val))
- dev_dbg(sec_pmic->dev, "Revision: 0x%x\n", val);
-}
-
-static void sec_pmic_configure(struct sec_pmic_dev *sec_pmic)
-{
- int err;
-
- if (sec_pmic->device_type != S2MPS13X)
- return;
-
- if (sec_pmic->pdata->disable_wrstbi) {
- /*
- * If WRSTBI pin is pulled down this feature must be disabled
- * because each Suspend to RAM will trigger buck voltage reset
- * to default values.
- */
- err = regmap_update_bits(sec_pmic->regmap_pmic,
- S2MPS13_REG_WRSTBI,
- S2MPS13_REG_WRSTBI_MASK, 0x0);
- if (err)
- dev_warn(sec_pmic->dev,
- "Cannot initialize WRSTBI config: %d\n",
- err);
- }
-}
-
-/*
- * Only the common platform data elements for s5m8767 are parsed here from the
- * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
- * others have to parse their own platform data elements from device tree.
- *
- * The s5m8767 platform data structure is instantiated here and the drivers for
- * the sub-modules need not instantiate another instance while parsing their
- * platform data.
- */
-static struct sec_platform_data *
-sec_pmic_i2c_parse_dt_pdata(struct device *dev)
-{
- struct sec_platform_data *pd;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
- pd->manual_poweroff = of_property_read_bool(dev->of_node,
- "samsung,s2mps11-acokb-ground");
- pd->disable_wrstbi = of_property_read_bool(dev->of_node,
- "samsung,s2mps11-wrstbi-ground");
- return pd;
-}
-
-static int sec_pmic_probe(struct i2c_client *i2c)
-{
- const struct regmap_config *regmap;
- struct sec_platform_data *pdata;
- const struct mfd_cell *sec_devs;
- struct sec_pmic_dev *sec_pmic;
- int ret, num_sec_devs;
-
- sec_pmic = devm_kzalloc(&i2c->dev, sizeof(struct sec_pmic_dev),
- GFP_KERNEL);
- if (sec_pmic == NULL)
- return -ENOMEM;
-
- i2c_set_clientdata(i2c, sec_pmic);
- sec_pmic->dev = &i2c->dev;
- sec_pmic->i2c = i2c;
- sec_pmic->irq = i2c->irq;
-
- pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- return ret;
- }
-
- sec_pmic->device_type = (unsigned long)of_device_get_match_data(sec_pmic->dev);
- sec_pmic->pdata = pdata;
-
- switch (sec_pmic->device_type) {
- case S2MPA01:
- regmap = &s2mpa01_regmap_config;
- break;
- case S2MPS11X:
- regmap = &s2mps11_regmap_config;
- break;
- case S2MPS13X:
- regmap = &s2mps13_regmap_config;
- break;
- case S2MPS14X:
- regmap = &s2mps14_regmap_config;
- break;
- case S2MPS15X:
- regmap = &s2mps15_regmap_config;
- break;
- case S5M8767X:
- regmap = &s5m8767_regmap_config;
- break;
- case S2MPU02:
- regmap = &s2mpu02_regmap_config;
- break;
- default:
- regmap = &sec_regmap_config;
- break;
- }
-
- sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap);
- if (IS_ERR(sec_pmic->regmap_pmic)) {
- ret = PTR_ERR(sec_pmic->regmap_pmic);
- dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
- ret);
- return ret;
- }
-
- sec_irq_init(sec_pmic);
-
- pm_runtime_set_active(sec_pmic->dev);
-
- switch (sec_pmic->device_type) {
- case S5M8767X:
- sec_devs = s5m8767_devs;
- num_sec_devs = ARRAY_SIZE(s5m8767_devs);
- break;
- case S2DOS05:
- sec_devs = s2dos05_devs;
- num_sec_devs = ARRAY_SIZE(s2dos05_devs);
- break;
- case S2MPA01:
- sec_devs = s2mpa01_devs;
- num_sec_devs = ARRAY_SIZE(s2mpa01_devs);
- break;
- case S2MPS11X:
- sec_devs = s2mps11_devs;
- num_sec_devs = ARRAY_SIZE(s2mps11_devs);
- break;
- case S2MPS13X:
- sec_devs = s2mps13_devs;
- num_sec_devs = ARRAY_SIZE(s2mps13_devs);
- break;
- case S2MPS14X:
- sec_devs = s2mps14_devs;
- num_sec_devs = ARRAY_SIZE(s2mps14_devs);
- break;
- case S2MPS15X:
- sec_devs = s2mps15_devs;
- num_sec_devs = ARRAY_SIZE(s2mps15_devs);
- break;
- case S2MPU02:
- sec_devs = s2mpu02_devs;
- num_sec_devs = ARRAY_SIZE(s2mpu02_devs);
- break;
- case S2MPU05:
- sec_devs = s2mpu05_devs;
- num_sec_devs = ARRAY_SIZE(s2mpu05_devs);
- break;
- default:
- dev_err(&i2c->dev, "Unsupported device type (%lu)\n",
- sec_pmic->device_type);
- return -ENODEV;
- }
- ret = devm_mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs,
- NULL, 0, NULL);
- if (ret)
- return ret;
-
- sec_pmic_configure(sec_pmic);
- sec_pmic_dump_rev(sec_pmic);
-
- return ret;
-}
-
-static void sec_pmic_shutdown(struct i2c_client *i2c)
-{
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
- unsigned int reg, mask;
-
- if (!sec_pmic->pdata->manual_poweroff)
- return;
-
- switch (sec_pmic->device_type) {
- case S2MPS11X:
- reg = S2MPS11_REG_CTRL1;
- mask = S2MPS11_CTRL1_PWRHOLD_MASK;
- break;
- default:
- /*
- * Currently only one board with S2MPS11 needs this, so just
- * ignore the rest.
- */
- dev_warn(sec_pmic->dev,
- "Unsupported device %lu for manual power off\n",
- sec_pmic->device_type);
- return;
- }
-
- regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, 0);
-}
-
-static int sec_pmic_suspend(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(sec_pmic->irq);
- /*
- * PMIC IRQ must be disabled during suspend for RTC alarm
- * to work properly.
- * When device is woken up from suspend, an
- * interrupt occurs before resuming I2C bus controller.
- * The interrupt is handled by regmap_irq_thread which tries
- * to read RTC registers. This read fails (I2C is still
- * suspended) and RTC Alarm interrupt is disabled.
- */
- disable_irq(sec_pmic->irq);
-
- return 0;
-}
-
-static int sec_pmic_resume(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(sec_pmic->irq);
- enable_irq(sec_pmic->irq);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops,
- sec_pmic_suspend, sec_pmic_resume);
-
-static struct i2c_driver sec_pmic_driver = {
- .driver = {
- .name = "sec_pmic",
- .pm = pm_sleep_ptr(&sec_pmic_pm_ops),
- .of_match_table = sec_dt_match,
- },
- .probe = sec_pmic_probe,
- .shutdown = sec_pmic_shutdown,
-};
-module_i2c_driver(sec_pmic_driver);
-
-MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("Core support for the S5M MFD");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-core.h b/drivers/mfd/sec-core.h
new file mode 100644
index 000000000000..92c7558ab8b0
--- /dev/null
+++ b/drivers/mfd/sec-core.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung SxM core driver internal data
+ */
+
+#ifndef __SEC_CORE_INT_H
+#define __SEC_CORE_INT_H
+
+struct i2c_client;
+
+extern const struct dev_pm_ops sec_pmic_pm_ops;
+
+int sec_pmic_probe(struct device *dev, int device_type, unsigned int irq,
+ struct regmap *regmap, struct i2c_client *client);
+void sec_pmic_shutdown(struct device *dev);
+
+int sec_irq_init(struct sec_pmic_dev *sec_pmic);
+
+#endif /* __SEC_CORE_INT_H */
diff --git a/drivers/mfd/sec-i2c.c b/drivers/mfd/sec-i2c.c
new file mode 100644
index 000000000000..3132b849b4bc
--- /dev/null
+++ b/drivers/mfd/sec-i2c.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung SxM I2C driver
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/s2mpa01.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps13.h>
+#include <linux/mfd/samsung/s2mps14.h>
+#include <linux/mfd/samsung/s2mps15.h>
+#include <linux/mfd/samsung/s2mpu02.h>
+#include <linux/mfd/samsung/s5m8767.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+struct sec_pmic_i2c_platform_data {
+ const struct regmap_config *regmap_cfg;
+ int device_type;
+};
+
+static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPA01_REG_INT1M:
+ case S2MPA01_REG_INT2M:
+ case S2MPA01_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool s2mps11_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPS11_REG_INT1M:
+ case S2MPS11_REG_INT2M:
+ case S2MPS11_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool s2mpu02_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPU02_REG_INT1M:
+ case S2MPU02_REG_INT2M:
+ case S2MPU02_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config s2dos05_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regmap_config s2mpa01_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPA01_REG_LDO_OVCB4,
+ .volatile_reg = s2mpa01_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps11_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS11_REG_L38CTRL,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps13_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS13_REG_LDODSCH5,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps14_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS14_REG_LDODSCH3,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps15_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS15_REG_LDODSCH4,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mpu02_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPU02_REG_DVSDATA,
+ .volatile_reg = s2mpu02_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mpu05_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regmap_config s5m8767_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S5M8767_REG_LDO28CTRL,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int sec_pmic_i2c_probe(struct i2c_client *client)
+{
+ const struct sec_pmic_i2c_platform_data *pdata;
+ struct regmap *regmap_pmic;
+
+ pdata = device_get_match_data(&client->dev);
+ if (!pdata)
+ return dev_err_probe(&client->dev, -ENODEV,
+ "Unsupported device type\n");
+
+ regmap_pmic = devm_regmap_init_i2c(client, pdata->regmap_cfg);
+ if (IS_ERR(regmap_pmic))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap_pmic),
+ "regmap init failed\n");
+
+ return sec_pmic_probe(&client->dev, pdata->device_type, client->irq,
+ regmap_pmic, client);
+}
+
+static void sec_pmic_i2c_shutdown(struct i2c_client *i2c)
+{
+ sec_pmic_shutdown(&i2c->dev);
+}
+
+static const struct sec_pmic_i2c_platform_data s2dos05_data = {
+ .regmap_cfg = &s2dos05_regmap_config,
+ .device_type = S2DOS05
+};
+
+static const struct sec_pmic_i2c_platform_data s2mpa01_data = {
+ .regmap_cfg = &s2mpa01_regmap_config,
+ .device_type = S2MPA01,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps11_data = {
+ .regmap_cfg = &s2mps11_regmap_config,
+ .device_type = S2MPS11X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps13_data = {
+ .regmap_cfg = &s2mps13_regmap_config,
+ .device_type = S2MPS13X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps14_data = {
+ .regmap_cfg = &s2mps14_regmap_config,
+ .device_type = S2MPS14X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps15_data = {
+ .regmap_cfg = &s2mps15_regmap_config,
+ .device_type = S2MPS15X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mpu02_data = {
+ .regmap_cfg = &s2mpu02_regmap_config,
+ .device_type = S2MPU02,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mpu05_data = {
+ .regmap_cfg = &s2mpu05_regmap_config,
+ .device_type = S2MPU05,
+};
+
+static const struct sec_pmic_i2c_platform_data s5m8767_data = {
+ .regmap_cfg = &s5m8767_regmap_config,
+ .device_type = S5M8767X,
+};
+
+static const struct of_device_id sec_pmic_i2c_of_match[] = {
+ { .compatible = "samsung,s2dos05", .data = &s2dos05_data, },
+ { .compatible = "samsung,s2mpa01-pmic", .data = &s2mpa01_data, },
+ { .compatible = "samsung,s2mps11-pmic", .data = &s2mps11_data, },
+ { .compatible = "samsung,s2mps13-pmic", .data = &s2mps13_data, },
+ { .compatible = "samsung,s2mps14-pmic", .data = &s2mps14_data, },
+ { .compatible = "samsung,s2mps15-pmic", .data = &s2mps15_data, },
+ { .compatible = "samsung,s2mpu02-pmic", .data = &s2mpu02_data, },
+ { .compatible = "samsung,s2mpu05-pmic", .data = &s2mpu05_data, },
+ { .compatible = "samsung,s5m8767-pmic", .data = &s5m8767_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sec_pmic_i2c_of_match);
+
+static struct i2c_driver sec_pmic_i2c_driver = {
+ .driver = {
+ .name = "sec-pmic-i2c",
+ .pm = pm_sleep_ptr(&sec_pmic_pm_ops),
+ .of_match_table = sec_pmic_i2c_of_match,
+ },
+ .probe = sec_pmic_i2c_probe,
+ .shutdown = sec_pmic_i2c_shutdown,
+};
+module_i2c_driver(sec_pmic_i2c_driver);
+
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("I2C driver for the Samsung S5M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 047fc065fcf1..c5c80b1ba104 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -3,227 +3,139 @@
// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
// http://www.samsung.com
-#include <linux/device.h>
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/dev_printk.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/s2mpg10.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s2mpu02.h>
#include <linux/mfd/samsung/s2mpu05.h>
#include <linux/mfd/samsung/s5m8767.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+static const struct regmap_irq s2mpg10_irqs[] = {
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWRONF, 0, S2MPG10_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWRONR, 0, S2MPG10_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_JIGONBF, 0, S2MPG10_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_JIGONBR, 0, S2MPG10_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_ACOKBF, 0, S2MPG10_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_ACOKBR, 0, S2MPG10_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWRON1S, 0, S2MPG10_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_MRB, 0, S2MPG10_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTC60S, 1, S2MPG10_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTCA1, 1, S2MPG10_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTCA0, 1, S2MPG10_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTC1S, 1, S2MPG10_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WTSR_COLDRST, 1, S2MPG10_IRQ_WTSR_COLDRST_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WTSR, 1, S2MPG10_IRQ_WTSR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WRST, 1, S2MPG10_IRQ_WRST_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SMPL, 1, S2MPG10_IRQ_SMPL_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_120C, 2, S2MPG10_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_140C, 2, S2MPG10_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_TSD, 2, S2MPG10_IRQ_TSD_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PIF_TIMEOUT1, 2, S2MPG10_IRQ_PIF_TIMEOUT1_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PIF_TIMEOUT2, 2, S2MPG10_IRQ_PIF_TIMEOUT2_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SPD_PARITY_ERR, 2, S2MPG10_IRQ_SPD_PARITY_ERR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SPD_ABNORMAL_STOP, 2, S2MPG10_IRQ_SPD_ABNORMAL_STOP_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PMETER_OVERF, 2, S2MPG10_IRQ_PMETER_OVERF_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B1M, 3, S2MPG10_IRQ_OCP_B1M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B2M, 3, S2MPG10_IRQ_OCP_B2M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B3M, 3, S2MPG10_IRQ_OCP_B3M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B4M, 3, S2MPG10_IRQ_OCP_B4M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B5M, 3, S2MPG10_IRQ_OCP_B5M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B6M, 3, S2MPG10_IRQ_OCP_B6M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B7M, 3, S2MPG10_IRQ_OCP_B7M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B8M, 3, S2MPG10_IRQ_OCP_B8M_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B9M, 4, S2MPG10_IRQ_OCP_B9M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B10M, 4, S2MPG10_IRQ_OCP_B10M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WLWP_ACC, 4, S2MPG10_IRQ_WLWP_ACC_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SMPL_TIMEOUT, 4, S2MPG10_IRQ_SMPL_TIMEOUT_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WTSR_TIMEOUT, 4, S2MPG10_IRQ_WTSR_TIMEOUT_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SPD_SRP_PKT_RST, 4, S2MPG10_IRQ_SPD_SRP_PKT_RST_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH0, 5, S2MPG10_IRQ_PWR_WARN_CH0_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH1, 5, S2MPG10_IRQ_PWR_WARN_CH1_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH2, 5, S2MPG10_IRQ_PWR_WARN_CH2_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH3, 5, S2MPG10_IRQ_PWR_WARN_CH3_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH4, 5, S2MPG10_IRQ_PWR_WARN_CH4_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH5, 5, S2MPG10_IRQ_PWR_WARN_CH5_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH6, 5, S2MPG10_IRQ_PWR_WARN_CH6_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH7, 5, S2MPG10_IRQ_PWR_WARN_CH7_MASK),
+};
static const struct regmap_irq s2mps11_irqs[] = {
- [S2MPS11_IRQ_PWRONF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONF_MASK,
- },
- [S2MPS11_IRQ_PWRONR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONR_MASK,
- },
- [S2MPS11_IRQ_JIGONBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBF_MASK,
- },
- [S2MPS11_IRQ_JIGONBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBR_MASK,
- },
- [S2MPS11_IRQ_ACOKBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBF_MASK,
- },
- [S2MPS11_IRQ_ACOKBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBR_MASK,
- },
- [S2MPS11_IRQ_PWRON1S] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRON1S_MASK,
- },
- [S2MPS11_IRQ_MRB] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_MRB_MASK,
- },
- [S2MPS11_IRQ_RTC60S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC60S_MASK,
- },
- [S2MPS11_IRQ_RTCA1] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
- },
- [S2MPS11_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
- [S2MPS11_IRQ_SMPL] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_SMPL_MASK,
- },
- [S2MPS11_IRQ_RTC1S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC1S_MASK,
- },
- [S2MPS11_IRQ_WTSR] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_WTSR_MASK,
- },
- [S2MPS11_IRQ_INT120C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT120C_MASK,
- },
- [S2MPS11_IRQ_INT140C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT140C_MASK,
- },
+ REGMAP_IRQ_REG(S2MPS11_IRQ_PWRONF, 0, S2MPS11_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_PWRONR, 0, S2MPS11_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_JIGONBF, 0, S2MPS11_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_JIGONBR, 0, S2MPS11_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_ACOKBF, 0, S2MPS11_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_ACOKBR, 0, S2MPS11_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_PWRON1S, 0, S2MPS11_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_MRB, 0, S2MPS11_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTC60S, 1, S2MPS11_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTCA1, 1, S2MPS11_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTCA0, 1, S2MPS11_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_SMPL, 1, S2MPS11_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTC1S, 1, S2MPS11_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_WTSR, 1, S2MPS11_IRQ_WTSR_MASK),
+
+ REGMAP_IRQ_REG(S2MPS11_IRQ_INT120C, 2, S2MPS11_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_INT140C, 2, S2MPS11_IRQ_INT140C_MASK),
};
static const struct regmap_irq s2mps14_irqs[] = {
- [S2MPS14_IRQ_PWRONF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONF_MASK,
- },
- [S2MPS14_IRQ_PWRONR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONR_MASK,
- },
- [S2MPS14_IRQ_JIGONBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBF_MASK,
- },
- [S2MPS14_IRQ_JIGONBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBR_MASK,
- },
- [S2MPS14_IRQ_ACOKBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBF_MASK,
- },
- [S2MPS14_IRQ_ACOKBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBR_MASK,
- },
- [S2MPS14_IRQ_PWRON1S] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRON1S_MASK,
- },
- [S2MPS14_IRQ_MRB] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_MRB_MASK,
- },
- [S2MPS14_IRQ_RTC60S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC60S_MASK,
- },
- [S2MPS14_IRQ_RTCA1] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
- },
- [S2MPS14_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
- [S2MPS14_IRQ_SMPL] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_SMPL_MASK,
- },
- [S2MPS14_IRQ_RTC1S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC1S_MASK,
- },
- [S2MPS14_IRQ_WTSR] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_WTSR_MASK,
- },
- [S2MPS14_IRQ_INT120C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT120C_MASK,
- },
- [S2MPS14_IRQ_INT140C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT140C_MASK,
- },
- [S2MPS14_IRQ_TSD] = {
- .reg_offset = 2,
- .mask = S2MPS14_IRQ_TSD_MASK,
- },
+ REGMAP_IRQ_REG(S2MPS14_IRQ_PWRONF, 0, S2MPS11_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_PWRONR, 0, S2MPS11_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_JIGONBF, 0, S2MPS11_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_JIGONBR, 0, S2MPS11_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_ACOKBF, 0, S2MPS11_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_ACOKBR, 0, S2MPS11_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_PWRON1S, 0, S2MPS11_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_MRB, 0, S2MPS11_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTC60S, 1, S2MPS11_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTCA1, 1, S2MPS11_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTCA0, 1, S2MPS11_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_SMPL, 1, S2MPS11_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTC1S, 1, S2MPS11_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_WTSR, 1, S2MPS11_IRQ_WTSR_MASK),
+
+ REGMAP_IRQ_REG(S2MPS14_IRQ_INT120C, 2, S2MPS11_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_INT140C, 2, S2MPS11_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_TSD, 2, S2MPS14_IRQ_TSD_MASK),
};
static const struct regmap_irq s2mpu02_irqs[] = {
- [S2MPU02_IRQ_PWRONF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONF_MASK,
- },
- [S2MPU02_IRQ_PWRONR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONR_MASK,
- },
- [S2MPU02_IRQ_JIGONBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBF_MASK,
- },
- [S2MPU02_IRQ_JIGONBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBR_MASK,
- },
- [S2MPU02_IRQ_ACOKBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBF_MASK,
- },
- [S2MPU02_IRQ_ACOKBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBR_MASK,
- },
- [S2MPU02_IRQ_PWRON1S] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRON1S_MASK,
- },
- [S2MPU02_IRQ_MRB] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_MRB_MASK,
- },
- [S2MPU02_IRQ_RTC60S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC60S_MASK,
- },
- [S2MPU02_IRQ_RTCA1] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
- },
- [S2MPU02_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
- [S2MPU02_IRQ_SMPL] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_SMPL_MASK,
- },
- [S2MPU02_IRQ_RTC1S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC1S_MASK,
- },
- [S2MPU02_IRQ_WTSR] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_WTSR_MASK,
- },
- [S2MPU02_IRQ_INT120C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT120C_MASK,
- },
- [S2MPU02_IRQ_INT140C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT140C_MASK,
- },
- [S2MPU02_IRQ_TSD] = {
- .reg_offset = 2,
- .mask = S2MPS14_IRQ_TSD_MASK,
- },
+ REGMAP_IRQ_REG(S2MPU02_IRQ_PWRONF, 0, S2MPS11_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_PWRONR, 0, S2MPS11_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_JIGONBF, 0, S2MPS11_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_JIGONBR, 0, S2MPS11_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_ACOKBF, 0, S2MPS11_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_ACOKBR, 0, S2MPS11_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_PWRON1S, 0, S2MPS11_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_MRB, 0, S2MPS11_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTC60S, 1, S2MPS11_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTCA1, 1, S2MPS11_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTCA0, 1, S2MPS11_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_SMPL, 1, S2MPS11_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTC1S, 1, S2MPS11_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_WTSR, 1, S2MPS11_IRQ_WTSR_MASK),
+
+ REGMAP_IRQ_REG(S2MPU02_IRQ_INT120C, 2, S2MPS11_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_INT140C, 2, S2MPS11_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_TSD, 2, S2MPS14_IRQ_TSD_MASK),
};
static const struct regmap_irq s2mpu05_irqs[] = {
@@ -247,74 +159,35 @@ static const struct regmap_irq s2mpu05_irqs[] = {
};
static const struct regmap_irq s5m8767_irqs[] = {
- [S5M8767_IRQ_PWRR] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_PWRR_MASK,
- },
- [S5M8767_IRQ_PWRF] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_PWRF_MASK,
- },
- [S5M8767_IRQ_PWR1S] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_PWR1S_MASK,
- },
- [S5M8767_IRQ_JIGR] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_JIGR_MASK,
- },
- [S5M8767_IRQ_JIGF] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_JIGF_MASK,
- },
- [S5M8767_IRQ_LOWBAT2] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_LOWBAT2_MASK,
- },
- [S5M8767_IRQ_LOWBAT1] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_LOWBAT1_MASK,
- },
- [S5M8767_IRQ_MRB] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_MRB_MASK,
- },
- [S5M8767_IRQ_DVSOK2] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_DVSOK2_MASK,
- },
- [S5M8767_IRQ_DVSOK3] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_DVSOK3_MASK,
- },
- [S5M8767_IRQ_DVSOK4] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_DVSOK4_MASK,
- },
- [S5M8767_IRQ_RTC60S] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTC60S_MASK,
- },
- [S5M8767_IRQ_RTCA1] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTCA1_MASK,
- },
- [S5M8767_IRQ_RTCA2] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTCA2_MASK,
- },
- [S5M8767_IRQ_SMPL] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_SMPL_MASK,
- },
- [S5M8767_IRQ_RTC1S] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTC1S_MASK,
- },
- [S5M8767_IRQ_WTSR] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_WTSR_MASK,
- },
+ REGMAP_IRQ_REG(S5M8767_IRQ_PWRR, 0, S5M8767_IRQ_PWRR_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_PWRF, 0, S5M8767_IRQ_PWRF_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_PWR1S, 0, S5M8767_IRQ_PWR1S_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_JIGR, 0, S5M8767_IRQ_JIGR_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_JIGF, 0, S5M8767_IRQ_JIGF_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_LOWBAT2, 0, S5M8767_IRQ_LOWBAT2_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_LOWBAT1, 0, S5M8767_IRQ_LOWBAT1_MASK),
+
+ REGMAP_IRQ_REG(S5M8767_IRQ_MRB, 1, S5M8767_IRQ_MRB_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_DVSOK2, 1, S5M8767_IRQ_DVSOK2_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_DVSOK3, 1, S5M8767_IRQ_DVSOK3_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_DVSOK4, 1, S5M8767_IRQ_DVSOK4_MASK),
+
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTC60S, 2, S5M8767_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTCA1, 2, S5M8767_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTCA2, 2, S5M8767_IRQ_RTCA2_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_SMPL, 2, S5M8767_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTC1S, 2, S5M8767_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_WTSR, 2, S5M8767_IRQ_WTSR_MASK),
+};
+
+/* All S2MPG10 interrupt sources are read-only and don't require clearing */
+static const struct regmap_irq_chip s2mpg10_irq_chip = {
+ .name = "s2mpg10",
+ .irqs = s2mpg10_irqs,
+ .num_irqs = ARRAY_SIZE(s2mpg10_irqs),
+ .num_regs = 6,
+ .status_base = S2MPG10_PMIC_INT1,
+ .mask_base = S2MPG10_PMIC_INT1M,
};
static const struct regmap_irq_chip s2mps11_irq_chip = {
@@ -382,23 +255,21 @@ static const struct regmap_irq_chip s5m8767_irq_chip = {
int sec_irq_init(struct sec_pmic_dev *sec_pmic)
{
- int ret = 0;
- int type = sec_pmic->device_type;
const struct regmap_irq_chip *sec_irq_chip;
+ int ret;
- if (!sec_pmic->irq) {
- dev_warn(sec_pmic->dev,
- "No interrupt specified, no interrupts\n");
- return 0;
- }
-
- switch (type) {
+ switch (sec_pmic->device_type) {
case S5M8767X:
sec_irq_chip = &s5m8767_irq_chip;
break;
+ case S2DOS05:
+ return 0;
case S2MPA01:
sec_irq_chip = &s2mps14_irq_chip;
break;
+ case S2MPG10:
+ sec_irq_chip = &s2mpg10_irq_chip;
+ break;
case S2MPS11X:
sec_irq_chip = &s2mps11_irq_chip;
break;
@@ -418,18 +289,24 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
sec_irq_chip = &s2mpu05_irq_chip;
break;
default:
- dev_err(sec_pmic->dev, "Unknown device type %lu\n",
- sec_pmic->device_type);
- return -EINVAL;
+ return dev_err_probe(sec_pmic->dev, -EINVAL,
+ "Unsupported device type %d\n",
+ sec_pmic->device_type);
+ }
+
+ if (!sec_pmic->irq) {
+ dev_warn(sec_pmic->dev,
+ "No interrupt specified, no interrupts\n");
+ return 0;
}
ret = devm_regmap_add_irq_chip(sec_pmic->dev, sec_pmic->regmap_pmic,
sec_pmic->irq, IRQF_ONESHOT,
0, sec_irq_chip, &sec_pmic->irq_data);
- if (ret != 0) {
- dev_err(sec_pmic->dev, "Failed to register IRQ chip: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(sec_pmic->dev, ret,
+ "Failed to add %s IRQ chip\n",
+ sec_irq_chip->name);
/*
* The rtc-s5m driver requests S2MPS14_IRQ_RTCA0 also for S2MPS11
@@ -439,10 +316,3 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return 0;
}
-EXPORT_SYMBOL_GPL(sec_irq_init);
-
-MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_DESCRIPTION("Interrupt support for the S5M MFD");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 7ee293b09f62..a5f9241fa3f2 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -631,49 +631,6 @@ unsigned long sm501_set_clock(struct device *dev,
EXPORT_SYMBOL_GPL(sm501_set_clock);
-/* sm501_find_clock
- *
- * finds the closest available frequency for a given clock
-*/
-
-unsigned long sm501_find_clock(struct device *dev,
- int clksrc,
- unsigned long req_freq)
-{
- struct sm501_devdata *sm = dev_get_drvdata(dev);
- unsigned long sm501_freq; /* the frequency achieveable by the 501 */
- struct sm501_clock to;
-
- switch (clksrc) {
- case SM501_CLOCK_P2XCLK:
- if (sm->rev >= 0xC0) {
- /* SM502 -> use the programmable PLL */
- sm501_freq = (sm501_calc_pll(2 * req_freq,
- &to, 5) / 2);
- } else {
- sm501_freq = (sm501_select_clock(2 * req_freq,
- &to, 5) / 2);
- }
- break;
-
- case SM501_CLOCK_V2XCLK:
- sm501_freq = (sm501_select_clock(2 * req_freq, &to, 3) / 2);
- break;
-
- case SM501_CLOCK_MCLK:
- case SM501_CLOCK_M1XCLK:
- sm501_freq = sm501_select_clock(req_freq, &to, 3);
- break;
-
- default:
- sm501_freq = 0; /* error */
- }
-
- return sm501_freq;
-}
-
-EXPORT_SYMBOL_GPL(sm501_find_clock);
-
static struct sm501_device *to_sm_device(struct platform_device *pdev)
{
return container_of(pdev, struct sm501_device, pdev);
@@ -915,7 +872,8 @@ static void sm501_gpio_ensure_gpio(struct sm501_gpio_chip *smchip,
}
}
-static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int sm501_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
@@ -939,6 +897,8 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
sm501_gpio_ensure_gpio(smchip, bit);
spin_unlock_irqrestore(&smgpio->lock, save);
+
+ return 0;
}
static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
@@ -1005,7 +965,7 @@ static const struct gpio_chip gpio_chip_template = {
.ngpio = 32,
.direction_input = sm501_gpio_input,
.direction_output = sm501_gpio_output,
- .set = sm501_gpio_set,
+ .set_rv = sm501_gpio_set,
.get = sm501_gpio_get,
};
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 7186e2108108..d6b4350779e6 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -210,7 +210,10 @@ static int sprd_pmic_probe(struct spi_device *spi)
return ret;
}
- device_init_wakeup(&spi->dev, true);
+ ret = devm_device_init_wakeup(&spi->dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to init wakeup\n");
+
return 0;
}
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index b2704a9809c7..09073dbc9c80 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -6,6 +6,7 @@
* Inspired by Benjamin Gaignard's stm32-timers driver
*/
+#include <linux/bitfield.h>
#include <linux/mfd/stm32-lptimer.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -49,6 +50,36 @@ static int stm32_lptimer_detect_encoder(struct stm32_lptimer *ddata)
return 0;
}
+static int stm32_lptimer_detect_hwcfgr(struct stm32_lptimer *ddata)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(ddata->regmap, STM32_LPTIM_VERR, &ddata->version);
+ if (ret)
+ return ret;
+
+ /* Try to guess parameters from HWCFGR: e.g. encoder mode (STM32MP15) */
+ ret = regmap_read(ddata->regmap, STM32_LPTIM_HWCFGR1, &val);
+ if (ret)
+ return ret;
+
+ /* Fallback to legacy init if HWCFGR isn't present */
+ if (!val)
+ return stm32_lptimer_detect_encoder(ddata);
+
+ ddata->has_encoder = FIELD_GET(STM32_LPTIM_HWCFGR1_ENCODER, val);
+
+ ret = regmap_read(ddata->regmap, STM32_LPTIM_HWCFGR2, &val);
+ if (ret)
+ return ret;
+
+ /* Number of capture/compare channels */
+ ddata->num_cc_chans = FIELD_GET(STM32_LPTIM_HWCFGR2_CHAN_NUM, val);
+
+ return 0;
+}
+
static int stm32_lptimer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -73,7 +104,7 @@ static int stm32_lptimer_probe(struct platform_device *pdev)
if (IS_ERR(ddata->clk))
return PTR_ERR(ddata->clk);
- ret = stm32_lptimer_detect_encoder(ddata);
+ ret = stm32_lptimer_detect_hwcfgr(ddata);
if (ret)
return ret;
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 792236f56399..b9cc85ea2c40 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -129,7 +129,7 @@ static const struct spi_device_id stmpe_spi_id[] = {
{ "stmpe2403", STMPE2403 },
{ }
};
-MODULE_DEVICE_TABLE(spi, stmpe_id);
+MODULE_DEVICE_TABLE(spi, stmpe_spi_id);
static struct spi_driver stmpe_spi_driver = {
.driver = {
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 00fb12c4f491..03bd5cd66798 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -446,7 +446,7 @@ static irqreturn_t tps65010_irq(int irq, void *_tps)
* offsets 4..5 == LED1/nPG, LED2 (we set one of the non-BLINK modes)
* offset 6 == vibrator motor driver
*/
-static void
+static int
tps65010_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
if (offset < 4)
@@ -455,6 +455,8 @@ tps65010_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
tps65010_set_led(offset - 3, value ? ON : OFF);
else
tps65010_set_vib(value);
+
+ return 0;
}
static int
@@ -512,7 +514,6 @@ static void tps65010_remove(struct i2c_client *client)
if (client->irq > 0)
free_irq(client->irq, tps);
cancel_delayed_work_sync(&tps->work);
- debugfs_remove(tps->file);
the_tps = NULL;
}
@@ -608,7 +609,7 @@ static int tps65010_probe(struct i2c_client *client)
tps65010_work(&tps->work.work);
- tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, NULL,
+ tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, client->debugfs,
tps, DEBUG_FOPS);
/* optionally register GPIOs */
@@ -619,7 +620,7 @@ static int tps65010_probe(struct i2c_client *client)
tps->chip.parent = &client->dev;
tps->chip.owner = THIS_MODULE;
- tps->chip.set = tps65010_gpio_set;
+ tps->chip.set_rv = tps65010_gpio_set;
tps->chip.direction_output = tps65010_output;
/* NOTE: only partial support for inputs; nyet IRQs */
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index fc4d4c844a81..fd71ba29f6b5 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -104,7 +104,8 @@ unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
return ucb1x00_reg_read(ucb, UCB_IO_DATA);
}
-static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int ucb1x00_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ucb1x00 *ucb = gpiochip_get_data(chip);
unsigned long flags;
@@ -119,6 +120,8 @@ static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
ucb1x00_disable(ucb);
spin_unlock_irqrestore(&ucb->io_lock, flags);
+
+ return 0;
}
static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -567,7 +570,7 @@ static int ucb1x00_probe(struct mcp *mcp)
ucb->gpio.owner = THIS_MODULE;
ucb->gpio.base = pdata->gpio_base;
ucb->gpio.ngpio = 10;
- ucb->gpio.set = ucb1x00_gpio_set;
+ ucb->gpio.set_rv = ucb1x00_gpio_set;
ucb->gpio.get = ucb1x00_gpio_get;
ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index bdc2e6fda782..42e7d2a2a90c 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -465,40 +465,6 @@ static void uacce_release(struct device *dev)
kfree(uacce);
}
-static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
-{
- int ret;
-
- if (!(flags & UACCE_DEV_SVA))
- return flags;
-
- flags &= ~UACCE_DEV_SVA;
-
- ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF);
- if (ret) {
- dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret));
- return flags;
- }
-
- ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
- if (ret) {
- dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret));
- iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
- return flags;
- }
-
- return flags | UACCE_DEV_SVA;
-}
-
-static void uacce_disable_sva(struct uacce_device *uacce)
-{
- if (!(uacce->flags & UACCE_DEV_SVA))
- return;
-
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
-}
-
/**
* uacce_alloc() - alloc an accelerator
* @parent: pointer of uacce parent device
@@ -518,8 +484,6 @@ struct uacce_device *uacce_alloc(struct device *parent,
if (!uacce)
return ERR_PTR(-ENOMEM);
- flags = uacce_enable_sva(parent, flags);
-
uacce->parent = parent;
uacce->flags = flags;
uacce->ops = interface->ops;
@@ -542,7 +506,6 @@ struct uacce_device *uacce_alloc(struct device *parent,
return uacce;
err_with_uacce:
- uacce_disable_sva(uacce);
kfree(uacce);
return ERR_PTR(ret);
}
@@ -605,9 +568,6 @@ void uacce_remove(struct uacce_device *uacce)
unmap_mapping_range(q->mapping, 0, 0, 1);
}
- /* disable sva now since no opened queues */
- uacce_disable_sva(uacce);
-
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
xa_erase(&uacce_xa, uacce->dev_id);
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index ff2f9e55ef28..aed653ce8fa2 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -98,7 +98,7 @@ config MTD_MCHP48L640
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
This enable SNOR support on SPEAR platforms using SMI controller
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 8dc4f5c493fc..391d81ad960c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -559,7 +559,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
/* Sanitize user input */
p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
- return mtd_add_partition(mtd, p.devname, p.start, p.length);
+ return mtd_add_partition(mtd, p.devname, p.start, p.length, NULL);
case BLKPG_DEL_PARTITION:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5ba9a741f5ac..429d8c16baf0 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -68,7 +68,13 @@ static struct class mtd_class = {
.pm = MTD_CLS_PM_OPS,
};
+static struct class mtd_master_class = {
+ .name = "mtd_master",
+ .pm = MTD_CLS_PM_OPS,
+};
+
static DEFINE_IDR(mtd_idr);
+static DEFINE_IDR(mtd_master_idr);
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
@@ -83,8 +89,9 @@ EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
-
+#define MTD_MASTER_DEVS 255
#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
+static dev_t mtd_master_devt;
/* REVISIT once MTD uses the driver model better, whoever allocates
* the mtd_info will probably want to use the release() hook...
@@ -104,6 +111,17 @@ static void mtd_release(struct device *dev)
device_destroy(&mtd_class, index + 1);
}
+static void mtd_master_release(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ idr_remove(&mtd_master_idr, mtd->index);
+ of_node_put(mtd_get_of_node(mtd));
+
+ if (mtd_is_partition(mtd))
+ release_mtd_partition(mtd);
+}
+
static void mtd_device_release(struct kref *kref)
{
struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
@@ -367,6 +385,11 @@ static const struct device_type mtd_devtype = {
.release = mtd_release,
};
+static const struct device_type mtd_master_devtype = {
+ .name = "mtd_master",
+ .release = mtd_master_release,
+};
+
static bool mtd_expert_analysis_mode;
#ifdef CONFIG_DEBUG_FS
@@ -634,13 +657,13 @@ exit_parent:
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
+ * @partitioned: create partitioned device
*
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or non-zero on failure.
*/
-
-int add_mtd_device(struct mtd_info *mtd)
+int add_mtd_device(struct mtd_info *mtd, bool partitioned)
{
struct device_node *np = mtd_get_of_node(mtd);
struct mtd_info *master = mtd_get_master(mtd);
@@ -687,10 +710,17 @@ int add_mtd_device(struct mtd_info *mtd)
ofidx = -1;
if (np)
ofidx = of_alias_get_id(np, "mtd");
- if (ofidx >= 0)
- i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
- else
- i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ if (partitioned) {
+ if (ofidx >= 0)
+ i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
+ else
+ i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ } else {
+ if (ofidx >= 0)
+ i = idr_alloc(&mtd_master_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
+ else
+ i = idr_alloc(&mtd_master_idr, mtd, 0, 0, GFP_KERNEL);
+ }
if (i < 0) {
error = i;
goto fail_locked;
@@ -738,10 +768,18 @@ int add_mtd_device(struct mtd_info *mtd)
/* Caller should have set dev.parent to match the
* physical device, if appropriate.
*/
- mtd->dev.type = &mtd_devtype;
- mtd->dev.class = &mtd_class;
- mtd->dev.devt = MTD_DEVT(i);
- error = dev_set_name(&mtd->dev, "mtd%d", i);
+ if (partitioned) {
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ dev_set_name(&mtd->dev, "mtd%d", i);
+ error = dev_set_name(&mtd->dev, "mtd%d", i);
+ } else {
+ mtd->dev.type = &mtd_master_devtype;
+ mtd->dev.class = &mtd_master_class;
+ mtd->dev.devt = MKDEV(MAJOR(mtd_master_devt), i);
+ error = dev_set_name(&mtd->dev, "mtd_master%d", i);
+ }
if (error)
goto fail_devname;
dev_set_drvdata(&mtd->dev, mtd);
@@ -749,6 +787,7 @@ int add_mtd_device(struct mtd_info *mtd)
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
if (error) {
+ pr_err("mtd: %s device_register fail %d\n", mtd->name, error);
put_device(&mtd->dev);
goto fail_added;
}
@@ -760,10 +799,13 @@ int add_mtd_device(struct mtd_info *mtd)
mtd_debugfs_populate(mtd);
- device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
- "mtd%dro", i);
+ if (partitioned) {
+ device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
+ "mtd%dro", i);
+ }
- pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
+ pr_debug("mtd: Giving out %spartitioned device %d to %s\n",
+ partitioned ? "" : "un-", i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
@@ -771,13 +813,16 @@ int add_mtd_device(struct mtd_info *mtd)
mutex_unlock(&mtd_table_mutex);
- if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
- if (IS_BUILTIN(CONFIG_MTD)) {
- pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
- ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
- } else {
- pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
- mtd->index, mtd->name);
+ if (partitioned) {
+ if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
+ if (IS_BUILTIN(CONFIG_MTD)) {
+ pr_info("mtd: setting mtd%d (%s) as root device\n",
+ mtd->index, mtd->name);
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ } else {
+ pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
+ mtd->index, mtd->name);
+ }
}
}
@@ -793,7 +838,10 @@ fail_nvmem_add:
fail_added:
of_node_put(mtd_get_of_node(mtd));
fail_devname:
- idr_remove(&mtd_idr, i);
+ if (partitioned)
+ idr_remove(&mtd_idr, i);
+ else
+ idr_remove(&mtd_master_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
return error;
@@ -811,12 +859,14 @@ fail_locked:
int del_mtd_device(struct mtd_info *mtd)
{
- int ret;
struct mtd_notifier *not;
+ struct idr *idr;
+ int ret;
mutex_lock(&mtd_table_mutex);
- if (idr_find(&mtd_idr, mtd->index) != mtd) {
+ idr = mtd->dev.class == &mtd_class ? &mtd_idr : &mtd_master_idr;
+ if (idr_find(idr, mtd->index) != mtd) {
ret = -ENODEV;
goto out_error;
}
@@ -1056,6 +1106,7 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
+ struct mtd_info *parent;
int ret, err;
mtd_set_dev_defaults(mtd);
@@ -1064,25 +1115,30 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
if (ret)
goto out;
+ ret = add_mtd_device(mtd, false);
+ if (ret)
+ goto out;
+
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
- ret = add_mtd_device(mtd);
+ ret = mtd_add_partition(mtd, mtd->name, 0, MTDPART_SIZ_FULL, &parent);
if (ret)
goto out;
+
+ } else {
+ parent = mtd;
}
/* Prefer parsed partitions over driver-provided fallback */
- ret = parse_mtd_partitions(mtd, types, parser_data);
+ ret = parse_mtd_partitions(parent, types, parser_data);
if (ret == -EPROBE_DEFER)
goto out;
if (ret > 0)
ret = 0;
else if (nr_parts)
- ret = add_mtd_partitions(mtd, parts, nr_parts);
- else if (!device_is_registered(&mtd->dev))
- ret = add_mtd_device(mtd);
- else
- ret = 0;
+ ret = add_mtd_partitions(parent, parts, nr_parts);
+ else if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ ret = mtd_add_partition(parent, mtd->name, 0, MTDPART_SIZ_FULL, NULL);
if (ret)
goto out;
@@ -1102,13 +1158,14 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
register_reboot_notifier(&mtd->reboot_notifier);
}
+ return 0;
out:
- if (ret) {
- nvmem_unregister(mtd->otp_user_nvmem);
- nvmem_unregister(mtd->otp_factory_nvmem);
- }
+ nvmem_unregister(mtd->otp_user_nvmem);
+ nvmem_unregister(mtd->otp_factory_nvmem);
- if (ret && device_is_registered(&mtd->dev)) {
+ del_mtd_partitions(mtd);
+
+ if (device_is_registered(&mtd->dev)) {
err = del_mtd_device(mtd);
if (err)
pr_err("Error when deleting MTD device (%d)\n", err);
@@ -1267,8 +1324,7 @@ int __get_mtd_device(struct mtd_info *mtd)
mtd = mtd->parent;
}
- if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
- kref_get(&master->refcnt);
+ kref_get(&master->refcnt);
return 0;
}
@@ -1362,8 +1418,7 @@ void __put_mtd_device(struct mtd_info *mtd)
mtd = parent;
}
- if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
- kref_put(&master->refcnt, mtd_device_release);
+ kref_put(&master->refcnt, mtd_device_release);
module_put(master->owner);
@@ -2530,6 +2585,16 @@ static int __init init_mtd(void)
if (ret)
goto err_reg;
+ ret = class_register(&mtd_master_class);
+ if (ret)
+ goto err_reg2;
+
+ ret = alloc_chrdev_region(&mtd_master_devt, 0, MTD_MASTER_DEVS, "mtd_master");
+ if (ret < 0) {
+ pr_err("unable to allocate char dev region\n");
+ goto err_chrdev;
+ }
+
mtd_bdi = mtd_bdi_init("mtd");
if (IS_ERR(mtd_bdi)) {
ret = PTR_ERR(mtd_bdi);
@@ -2554,6 +2619,10 @@ out_procfs:
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
err_bdi:
+ unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS);
+err_chrdev:
+ class_unregister(&mtd_master_class);
+err_reg2:
class_unregister(&mtd_class);
err_reg:
pr_err("Error registering mtd class or bdi: %d\n", ret);
@@ -2567,9 +2636,12 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
+ class_unregister(&mtd_master_class);
+ unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS);
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
idr_destroy(&mtd_idr);
+ idr_destroy(&mtd_master_idr);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index b014861a06a6..2258d31c5aa6 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -8,7 +8,7 @@ extern struct mutex mtd_table_mutex;
extern struct backing_dev_info *mtd_bdi;
struct mtd_info *__mtd_next_device(int i);
-int __must_check add_mtd_device(struct mtd_info *mtd);
+int __must_check add_mtd_device(struct mtd_info *mtd, bool partitioned);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 994e8c51e674..5a3db36d734e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -86,8 +86,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
* parent conditional on that option. Note, this is a way to
* distinguish between the parent and its partitions in sysfs.
*/
- child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
- &parent->dev : parent->dev.parent;
+ child->dev.parent = &parent->dev;
child->dev.of_node = part->of_node;
child->parent = parent;
child->part.offset = part->offset;
@@ -243,7 +242,7 @@ static int mtd_add_partition_attrs(struct mtd_info *new)
}
int mtd_add_partition(struct mtd_info *parent, const char *name,
- long long offset, long long length)
+ long long offset, long long length, struct mtd_info **out)
{
struct mtd_info *master = mtd_get_master(parent);
u64 parent_size = mtd_is_partition(parent) ?
@@ -276,12 +275,15 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(child);
+ ret = add_mtd_device(child, true);
if (ret)
goto err_remove_part;
mtd_add_partition_attrs(child);
+ if (out)
+ *out = child;
+
return 0;
err_remove_part:
@@ -413,7 +415,7 @@ int add_mtd_partitions(struct mtd_info *parent,
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(child);
+ ret = add_mtd_device(child, true);
if (ret) {
mutex_lock(&master->master.partitions_lock);
list_del(&child->part.node);
@@ -590,9 +592,6 @@ static int mtd_part_of_parse(struct mtd_info *master,
int ret, err = 0;
dev = &master->dev;
- /* Use parent device (controller) if the top level MTD is not registered */
- if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master))
- dev = master->dev.parent;
np = mtd_get_of_node(master);
if (mtd_is_partition(master))
@@ -711,6 +710,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
if (ret < 0 && !err)
err = ret;
}
+
return err;
}
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
index 56b56f726b99..1bf9a5a64b87 100644
--- a/drivers/mtd/nand/ecc-mxic.c
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
- int nents, step, ret;
+ int nents, step, ret = 0;
if (req->mode == MTD_OPS_RAW)
return 0;
diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
index e0ed25b5afea..4dc4d65e7d32 100644
--- a/drivers/mtd/nand/qpic_common.c
+++ b/drivers/mtd/nand/qpic_common.c
@@ -236,21 +236,21 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
int i, ret;
struct bam_cmd_element *bam_ce_buffer;
struct bam_transaction *bam_txn = nandc->bam_txn;
+ u32 offset;
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
/* fill the command desc */
for (i = 0; i < size; i++) {
+ offset = nandc->props->bam_offset + reg_off + 4 * i;
if (read)
bam_prep_ce(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_READ_COMMAND,
+ offset, BAM_READ_COMMAND,
reg_buf_dma_addr(nandc,
(__le32 *)vaddr + i));
else
bam_prep_ce_le32(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_WRITE_COMMAND,
+ offset, BAM_WRITE_COMMAND,
*((__le32 *)vaddr + i));
}
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index b8035df8f732..4b99d9c422c3 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -34,7 +34,7 @@ config MTD_NAND_DENALI_DT
config MTD_NAND_AMS_DELTA
tristate "Amstrad E3 NAND controller"
depends on MACH_AMS_DELTA || COMPILE_TEST
- default y
+ default MACH_AMS_DELTA
help
Support for NAND flash on Amstrad E3 (Delta).
@@ -462,6 +462,13 @@ config MTD_NAND_NUVOTON_MA35
Enables support for the NAND controller found on
the Nuvoton MA35 series SoCs.
+config MTD_NAND_LOONGSON1
+ tristate "Loongson1 NAND controller"
+ depends on LOONGSON1_APB_DMA || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Enables support for NAND controller on Loongson1 SoCs.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 99e79c448847..711d043ad4f8 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o
+obj-$(CONFIG_MTD_NAND_LOONGSON1) += loongson1-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 6487dfc64258..e532c3535b16 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -171,6 +171,7 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
{
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
u32 code = 0;
+ int rc;
if (cmd == NAND_CMD_NONE)
return;
@@ -182,7 +183,9 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
if (cmd != NAND_CMD_RESET)
code |= NCTL_CSA;
- bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+ rc = bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+ if (rc)
+ pr_err("ctl_cmd didn't work with error %d\n", rc);
}
/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 17f6d9723df9..62bdda3be92f 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -65,6 +65,7 @@ module_param(wp_on, int, 0444);
#define CMD_PARAMETER_READ 0x0e
#define CMD_PARAMETER_CHANGE_COL 0x0f
#define CMD_LOW_LEVEL_OP 0x10
+#define CMD_NOT_SUPPORTED 0xff
struct brcm_nand_dma_desc {
u32 next_desc;
@@ -101,7 +102,7 @@ struct brcm_nand_dma_desc {
#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
-#define NAND_POLL_STATUS_TIMEOUT_MS 100
+#define NAND_POLL_STATUS_TIMEOUT_MS 500
#define EDU_CMD_WRITE 0x00
#define EDU_CMD_READ 0x01
@@ -199,6 +200,30 @@ static const u16 flash_dma_regs_v4[] = {
[FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
};
+/* Native command conversion for legacy controllers (< v5.0) */
+static const u8 native_cmd_conv[] = {
+ [NAND_CMD_READ0] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READ1] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDOUT] = CMD_PARAMETER_CHANGE_COL,
+ [NAND_CMD_PAGEPROG] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READOOB] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_ERASE1] = CMD_BLOCK_ERASE,
+ [NAND_CMD_STATUS] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_SEQIN] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDIN] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READID] = CMD_DEVICE_ID_READ,
+ [NAND_CMD_ERASE2] = CMD_NULL,
+ [NAND_CMD_PARAM] = CMD_PARAMETER_READ,
+ [NAND_CMD_GET_FEATURES] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_SET_FEATURES] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RESET] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READSTART] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READCACHESEQ] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READCACHEEND] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDOUTSTART] = CMD_NULL,
+ [NAND_CMD_CACHEDPROG] = CMD_NOT_SUPPORTED,
+};
+
/* Controller feature flags */
enum {
BRCMNAND_HAS_1K_SECTORS = BIT(0),
@@ -237,6 +262,12 @@ struct brcmnand_controller {
/* List of NAND hosts (one for each chip-select) */
struct list_head host_list;
+ /* Functions to be called from exec_op */
+ int (*check_instr)(struct nand_chip *chip,
+ const struct nand_operation *op);
+ int (*exec_instr)(struct nand_chip *chip,
+ const struct nand_operation *op);
+
/* EDU info, per-transaction */
const u16 *edu_offsets;
void __iomem *edu_base;
@@ -310,9 +341,6 @@ struct brcmnand_host {
struct platform_device *pdev;
int cs;
- unsigned int last_cmd;
- unsigned int last_byte;
- u64 last_addr;
struct brcmnand_cfg hwcfg;
struct brcmnand_controller *ctrl;
};
@@ -2233,14 +2261,11 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
u64 addr = (u64)page << chip->page_shift;
- host->last_addr = addr;
-
- return brcmnand_read(mtd, chip, host->last_addr,
- mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ return brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
+ (u32 *)buf, oob);
}
static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
@@ -2252,11 +2277,9 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int ret;
u64 addr = (u64)page << chip->page_shift;
- host->last_addr = addr;
-
brcmnand_set_ecc_enabled(host, 0);
- ret = brcmnand_read(mtd, chip, host->last_addr,
- mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ ret = brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
+ (u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
}
@@ -2363,13 +2386,10 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
u64 addr = (u64)page << chip->page_shift;
- host->last_addr = addr;
-
- return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ return brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
}
static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
@@ -2381,9 +2401,8 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
u64 addr = (u64)page << chip->page_shift;
int ret = 0;
- host->last_addr = addr;
brcmnand_set_ecc_enabled(host, 0);
- ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ ret = brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
@@ -2490,18 +2509,190 @@ static int brcmnand_op_is_reset(const struct nand_operation *op)
return 0;
}
+static int brcmnand_check_instructions(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ return 0;
+}
+
+static int brcmnand_exec_instructions(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = brcmnand_exec_instr(host, i, op);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmnand_check_instructions_legacy(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ unsigned int i;
+ u8 cmd;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ cmd = native_cmd_conv[instr->ctx.cmd.opcode];
+ if (cmd == CMD_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ break;
+ case NAND_OP_ADDR_INSTR:
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_WAITRDY_INSTR:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int brcmnand_exec_instructions_legacy(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ const struct nand_op_instr *instr;
+ unsigned int i, j;
+ u8 cmd = CMD_NULL, last_cmd = CMD_NULL;
+ int ret = 0;
+ u64 last_addr;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ cmd = native_cmd_conv[instr->ctx.cmd.opcode];
+ if (cmd == CMD_NOT_SUPPORTED) {
+ dev_err(ctrl->dev, "unsupported cmd=%d\n",
+ instr->ctx.cmd.opcode);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ } else if (instr->type == NAND_OP_ADDR_INSTR) {
+ u64 addr = 0;
+
+ if (cmd == CMD_NULL)
+ continue;
+
+ if (instr->ctx.addr.naddrs > 8) {
+ dev_err(ctrl->dev, "unsupported naddrs=%u\n",
+ instr->ctx.addr.naddrs);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (j = 0; j < instr->ctx.addr.naddrs; j++)
+ addr |= (instr->ctx.addr.addrs[j]) << (j << 3);
+
+ if (cmd == CMD_BLOCK_ERASE)
+ addr <<= chip->page_shift;
+ else if (cmd == CMD_PARAMETER_CHANGE_COL)
+ addr &= ~((u64)(FC_BYTES - 1));
+
+ brcmnand_set_cmd_addr(mtd, addr);
+ brcmnand_send_cmd(host, cmd);
+ last_addr = addr;
+ last_cmd = cmd;
+ cmd = CMD_NULL;
+ brcmnand_waitfunc(chip);
+
+ if (last_cmd == CMD_PARAMETER_READ ||
+ last_cmd == CMD_PARAMETER_CHANGE_COL) {
+ /* Copy flash cache word-wise */
+ u32 *flash_cache = (u32 *)ctrl->flash_cache;
+
+ brcmnand_soc_data_bus_prepare(ctrl->soc, true);
+
+ /*
+ * Must cache the FLASH_CACHE now, since changes in
+ * SECTOR_SIZE_1K may invalidate it
+ */
+ for (j = 0; j < FC_WORDS; j++)
+ /*
+ * Flash cache is big endian for parameter pages, at
+ * least on STB SoCs
+ */
+ flash_cache[j] = be32_to_cpu(brcmnand_read_fc(ctrl, j));
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
+ }
+ } else if (instr->type == NAND_OP_DATA_IN_INSTR) {
+ u8 *in = instr->ctx.data.buf.in;
+
+ if (last_cmd == CMD_DEVICE_ID_READ) {
+ u32 val;
+
+ if (instr->ctx.data.len > 8) {
+ dev_err(ctrl->dev, "unsupported len=%u\n",
+ instr->ctx.data.len);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (j = 0; j < instr->ctx.data.len; j++) {
+ if (j == 0)
+ val = brcmnand_read_reg(ctrl, BRCMNAND_ID);
+ else if (j == 4)
+ val = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT);
+
+ in[j] = (val >> (24 - ((j % 4) << 3))) & 0xff;
+ }
+ } else if (last_cmd == CMD_PARAMETER_READ ||
+ last_cmd == CMD_PARAMETER_CHANGE_COL) {
+ u64 addr;
+ u32 offs;
+
+ for (j = 0; j < instr->ctx.data.len; j++) {
+ addr = last_addr + j;
+ offs = addr & (FC_BYTES - 1);
+
+ if (j > 0 && offs == 0)
+ nand_change_read_column_op(chip, addr, NULL, 0,
+ false);
+
+ in[j] = ctrl->flash_cache[offs];
+ }
+ }
+ } else if (instr->type == NAND_OP_WAITRDY_INSTR) {
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ if (ret)
+ break;
+ } else {
+ dev_err(ctrl->dev, "unsupported instruction type: %d\n", instr->type);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ return ret;
+}
+
static int brcmnand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *status;
- unsigned int i;
int ret = 0;
if (check_only)
- return 0;
+ return ctrl->check_instr(chip, op);
if (brcmnand_op_is_status(op)) {
status = op->instrs[1].ctx.data.buf.in;
@@ -2525,11 +2716,7 @@ static int brcmnand_exec_op(struct nand_chip *chip,
if (op->deassert_wp)
brcmnand_wp(mtd, 0);
- for (i = 0; i < op->ninstrs; i++) {
- ret = brcmnand_exec_instr(host, i, op);
- if (ret)
- break;
- }
+ ret = ctrl->exec_instr(chip, op);
if (op->deassert_wp)
brcmnand_wp(mtd, 1);
@@ -3142,6 +3329,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
if (ret)
goto err;
+ /* Only v5.0+ controllers have low level ops support */
+ if (ctrl->nand_version >= 0x0500) {
+ ctrl->check_instr = brcmnand_check_instructions;
+ ctrl->exec_instr = brcmnand_exec_instructions;
+ } else {
+ ctrl->check_instr = brcmnand_check_instructions_legacy;
+ ctrl->exec_instr = brcmnand_exec_instructions_legacy;
+ }
+
/*
* Most chips have this cache at a fixed offset within 'nand' block.
* Some must specify this region separately.
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index e22094e39546..97fa32d73441 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -68,7 +68,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
- ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ ret = pcim_request_all_regions(dev, DENALI_NAND_NAME);
if (ret) {
dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
return ret;
@@ -77,20 +77,18 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto regions_release;
+ return -ENOMEM;
}
denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
- ret = -ENOMEM;
- goto regions_release;
+ return -ENOMEM;
}
ret = denali_init(denali);
if (ret)
- goto regions_release;
+ return ret;
nsels = denali->nbanks;
@@ -118,8 +116,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_remove_denali:
denali_remove(denali);
-regions_release:
- pci_release_regions(dev);
return ret;
}
@@ -127,7 +123,6 @@ static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_controller *denali = pci_get_drvdata(dev);
- pci_release_regions(dev);
denali_remove(denali);
}
diff --git a/drivers/mtd/nand/raw/loongson1-nand-controller.c b/drivers/mtd/nand/raw/loongson1-nand-controller.c
new file mode 100644
index 000000000000..ef8e4f9ce287
--- /dev/null
+++ b/drivers/mtd/nand/raw/loongson1-nand-controller.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NAND Controller Driver for Loongson-1 SoC
+ *
+ * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+/* Loongson-1 NAND Controller Registers */
+#define LS1X_NAND_CMD 0x0
+#define LS1X_NAND_ADDR1 0x4
+#define LS1X_NAND_ADDR2 0x8
+#define LS1X_NAND_TIMING 0xc
+#define LS1X_NAND_IDL 0x10
+#define LS1X_NAND_IDH_STATUS 0x14
+#define LS1X_NAND_PARAM 0x18
+#define LS1X_NAND_OP_NUM 0x1c
+
+/* NAND Command Register Bits */
+#define LS1X_NAND_CMD_OP_DONE BIT(10)
+#define LS1X_NAND_CMD_OP_SPARE BIT(9)
+#define LS1X_NAND_CMD_OP_MAIN BIT(8)
+#define LS1X_NAND_CMD_STATUS BIT(7)
+#define LS1X_NAND_CMD_RESET BIT(6)
+#define LS1X_NAND_CMD_READID BIT(5)
+#define LS1X_NAND_CMD_BLOCKS_ERASE BIT(4)
+#define LS1X_NAND_CMD_ERASE BIT(3)
+#define LS1X_NAND_CMD_WRITE BIT(2)
+#define LS1X_NAND_CMD_READ BIT(1)
+#define LS1X_NAND_CMD_VALID BIT(0)
+
+#define LS1X_NAND_WAIT_CYCLE_MASK GENMASK(7, 0)
+#define LS1X_NAND_HOLD_CYCLE_MASK GENMASK(15, 8)
+#define LS1X_NAND_CELL_SIZE_MASK GENMASK(11, 8)
+
+#define LS1X_NAND_COL_ADDR_CYC 2U
+#define LS1X_NAND_MAX_ADDR_CYC 5U
+
+#define BITS_PER_WORD (4 * BITS_PER_BYTE)
+
+struct ls1x_nand_host;
+
+struct ls1x_nand_op {
+ char addrs[LS1X_NAND_MAX_ADDR_CYC];
+ unsigned int naddrs;
+ unsigned int addrs_offset;
+ unsigned int aligned_offset;
+ unsigned int cmd_reg;
+ unsigned int row_start;
+ unsigned int rdy_timeout_ms;
+ unsigned int orig_len;
+ bool is_readid;
+ bool is_erase;
+ bool is_write;
+ bool is_read;
+ bool is_change_column;
+ size_t len;
+ char *buf;
+};
+
+struct ls1x_nand_data {
+ unsigned int status_field;
+ unsigned int op_scope_field;
+ unsigned int hold_cycle;
+ unsigned int wait_cycle;
+ void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op);
+};
+
+struct ls1x_nand_host {
+ struct device *dev;
+ struct nand_chip chip;
+ struct nand_controller controller;
+ const struct ls1x_nand_data *data;
+ void __iomem *reg_base;
+ struct regmap *regmap;
+ /* DMA Engine stuff */
+ dma_addr_t dma_base;
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
+ struct completion dma_complete;
+};
+
+static const struct regmap_config ls1x_nand_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+
+ op->row_start = chip->page_shift + 1;
+
+ /* The controller abstracts the following NAND operations. */
+ switch (opcode) {
+ case NAND_CMD_STATUS:
+ op->cmd_reg = LS1X_NAND_CMD_STATUS;
+ break;
+ case NAND_CMD_RESET:
+ op->cmd_reg = LS1X_NAND_CMD_RESET;
+ break;
+ case NAND_CMD_READID:
+ op->is_readid = true;
+ op->cmd_reg = LS1X_NAND_CMD_READID;
+ break;
+ case NAND_CMD_ERASE1:
+ op->is_erase = true;
+ op->addrs_offset = LS1X_NAND_COL_ADDR_CYC;
+ break;
+ case NAND_CMD_ERASE2:
+ if (!op->is_erase)
+ return -EOPNOTSUPP;
+ /* During erasing, row_start differs from the default value. */
+ op->row_start = chip->page_shift;
+ op->cmd_reg = LS1X_NAND_CMD_ERASE;
+ break;
+ case NAND_CMD_SEQIN:
+ op->is_write = true;
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (!op->is_write)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LS1X_NAND_CMD_WRITE;
+ break;
+ case NAND_CMD_READ0:
+ op->is_read = true;
+ break;
+ case NAND_CMD_READSTART:
+ if (!op->is_read)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LS1X_NAND_CMD_READ;
+ break;
+ case NAND_CMD_RNDOUT:
+ op->is_change_column = true;
+ break;
+ case NAND_CMD_RNDOUTSTART:
+ if (!op->is_change_column)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LS1X_NAND_CMD_READ;
+ break;
+ default:
+ dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ls1x_nand_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop, struct ls1x_nand_op *op)
+{
+ unsigned int op_id;
+ int ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case NAND_OP_ADDR_INSTR:
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ if (naddrs > LS1X_NAND_MAX_ADDR_CYC)
+ return -EOPNOTSUPP;
+ op->naddrs = naddrs;
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ op->orig_len = nand_subop_get_data_len(subop, op_id);
+ if (instr->type == NAND_OP_DATA_IN_INSTR)
+ op->buf = instr->ctx.data.buf.in + offset;
+ else if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ op->buf = (void *)instr->ctx.data.buf.out + offset;
+
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ int i;
+
+ for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LS1X_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ mask &= GENMASK(chip->page_shift, 0);
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
+
+ if (i == 4) {
+ mask = (u32)0xff >> (BITS_PER_WORD - shift);
+ val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
+ }
+ }
+ }
+}
+
+static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ int i;
+
+ for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LS1X_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
+ }
+ }
+}
+
+static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int col0 = op->addrs[0];
+ short col;
+
+ if (!IS_ALIGNED(col0, chip->buf_align)) {
+ col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
+ op->aligned_offset = op->addrs[0] - col0;
+ op->addrs[0] = col0;
+ }
+
+ if (host->data->set_addr)
+ host->data->set_addr(host, op);
+
+ /* set operation length */
+ if (op->is_write || op->is_read || op->is_change_column)
+ op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
+ else if (op->is_erase)
+ op->len = 1;
+ else
+ op->len = op->orig_len;
+
+ writel(op->len, host->reg_base + LS1X_NAND_OP_NUM);
+
+ /* set operation area and scope */
+ col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
+ if (op->orig_len && !op->is_readid) {
+ unsigned int op_scope = 0;
+
+ if (col < mtd->writesize) {
+ op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN;
+ op_scope = mtd->writesize;
+ }
+
+ op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE;
+ op_scope += mtd->oobsize;
+
+ op_scope <<= __ffs(host->data->op_scope_field);
+ regmap_update_bits(host->regmap, LS1X_NAND_PARAM,
+ host->data->op_scope_field, op_scope);
+ }
+
+ /* set command */
+ writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD);
+
+ /* trigger operation */
+ regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID);
+}
+
+static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ unsigned int val;
+ int ret = 0;
+
+ if (op->rdy_timeout_ms) {
+ ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD,
+ val, val & LS1X_NAND_CMD_OP_DONE,
+ 0, op->rdy_timeout_ms * MSEC_PER_SEC);
+ if (ret)
+ dev_err(host->dev, "operation failed\n");
+ }
+
+ return ret;
+}
+
+static void ls1x_nand_dma_callback(void *data)
+{
+ struct ls1x_nand_host *host = (struct ls1x_nand_host *)data;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
+ if (likely(status == DMA_COMPLETE)) {
+ dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
+ complete(&host->dma_complete);
+ } else {
+ dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
+ }
+}
+
+static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ void *buf = op->buf;
+ char *dma_buf = NULL;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
+ IS_ALIGNED(op->orig_len, chip->buf_align)) {
+ dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "failed to map DMA buffer\n");
+ return -ENXIO;
+ }
+ } else if (!op->is_write) {
+ dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+ } else {
+ dev_err(dev, "subpage writing not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "failed to prepare DMA descriptor\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ desc->callback = ls1x_nand_dma_callback;
+ desc->callback_param = host;
+
+ host->dma_cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(host->dma_cookie);
+ if (ret) {
+ dev_err(dev, "failed to submit DMA descriptor\n");
+ goto err;
+ }
+
+ dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
+ dma_async_issue_pending(chan);
+
+ if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
+ dmaengine_terminate_sync(chan);
+ reinit_completion(&host->dma_complete);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ if (dma_buf)
+ memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
+err:
+ if (dma_buf)
+ dma_free_coherent(dev, op->len, dma_buf, dma_addr);
+ else
+ dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
+
+ return ret;
+}
+
+static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ struct ls1x_nand_op op = {};
+ int ret;
+
+ ret = ls1x_nand_parse_instructions(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ ls1x_nand_trigger_op(host, &op);
+
+ ret = ls1x_nand_dma_transfer(host, &op);
+ if (ret)
+ return ret;
+
+ return ls1x_nand_wait_for_op_done(host, &op);
+}
+
+static int ls1x_nand_misc_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop, struct ls1x_nand_op *op)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ ret = ls1x_nand_parse_instructions(chip, subop, op);
+ if (ret)
+ return ret;
+
+ ls1x_nand_trigger_op(host, op);
+
+ return ls1x_nand_wait_for_op_done(host, op);
+}
+
+static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_op op = {};
+
+ return ls1x_nand_misc_type_exec(chip, subop, &op);
+}
+
+static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ struct ls1x_nand_op op = {};
+ int i, ret;
+ union {
+ char ids[5];
+ struct {
+ int idl;
+ char idh;
+ };
+ } nand_id;
+
+ ret = ls1x_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL);
+ nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS);
+
+ for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++)
+ op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i];
+
+ return ret;
+}
+
+static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ struct ls1x_nand_op op = {};
+ int val, ret;
+
+ ret = ls1x_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ val = readl(host->reg_base + LS1X_NAND_IDH_STATUS);
+ val &= ~host->data->status_field;
+ op.buf[0] = val << ffs(host->data->status_field);
+
+ return ret;
+}
+
+static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_read_status_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int ls1x_nand_is_valid_cmd(u8 opcode)
+{
+ if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
+{
+ if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
+ return 0;
+
+ if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &op->instrs[op_id];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ if (!instr1)
+ instr1 = instr;
+ else if (!instr2)
+ instr2 = instr;
+ else
+ break;
+ }
+ }
+
+ if (!instr1)
+ return -EOPNOTSUPP;
+
+ if (!instr2)
+ return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
+
+ return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
+}
+
+static int ls1x_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ if (check_only)
+ return ls1x_nand_check_op(chip, op);
+
+ return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only);
+}
+
+static int ls1x_nand_attach_chip(struct nand_chip *chip)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ u64 chipsize = nanddev_target_size(&chip->base);
+ int cell_size = 0;
+
+ switch (chipsize) {
+ case SZ_128M:
+ cell_size = 0x0;
+ break;
+ case SZ_256M:
+ cell_size = 0x1;
+ break;
+ case SZ_512M:
+ cell_size = 0x2;
+ break;
+ case SZ_1G:
+ cell_size = 0x3;
+ break;
+ case SZ_2G:
+ cell_size = 0x4;
+ break;
+ case SZ_4G:
+ cell_size = 0x5;
+ break;
+ case SZ_8G:
+ cell_size = 0x6;
+ break;
+ case SZ_16G:
+ cell_size = 0x7;
+ break;
+ default:
+ dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize);
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ break;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set cell size */
+ regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK,
+ FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size));
+
+ regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK,
+ FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
+
+ regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK,
+ FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
+
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+ return 0;
+}
+
+static const struct nand_controller_ops ls1x_nand_controller_ops = {
+ .exec_op = ls1x_nand_exec_op,
+ .attach_chip = ls1x_nand_attach_chip,
+};
+
+static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host)
+{
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+}
+
+static int ls1x_nand_controller_init(struct ls1x_nand_host *host)
+{
+ struct device *dev = host->dev;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg = {};
+ int ret;
+
+ host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config);
+ if (IS_ERR(host->regmap))
+ return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
+
+ chan = dma_request_chan(dev, "rxtx");
+ if (IS_ERR(chan))
+ return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
+ host->dma_chan = chan;
+
+ cfg.src_addr = host->dma_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr = host->dma_base;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ret = dmaengine_slave_config(host->dma_chan, &cfg);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to config DMA channel\n");
+
+ init_completion(&host->dma_complete);
+
+ return 0;
+}
+
+static int ls1x_nand_chip_init(struct ls1x_nand_host *host)
+{
+ struct device *dev = host->dev;
+ int nchips = of_get_child_count(dev->of_node);
+ struct device_node *chip_np;
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (nchips != 1)
+ return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
+
+ chip_np = of_get_next_child(dev->of_node, NULL);
+ if (!chip_np)
+ return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
+
+ nand_set_flash_node(chip, chip_np);
+ of_node_put(chip_np);
+ if (!mtd->name)
+ return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
+
+ nand_set_controller_data(chip, host);
+ chip->controller = &host->controller;
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
+ chip->buf_align = 16;
+ mtd->dev.parent = dev;
+ mtd->owner = THIS_MODULE;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return dev_err_probe(dev, ret, "failed to register MTD device\n");
+ }
+
+ return 0;
+}
+
+static int ls1x_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct ls1x_nand_data *data;
+ struct ls1x_nand_host *host;
+ struct resource *res;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->reg_base))
+ return PTR_ERR(host->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
+ if (!res)
+ return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
+
+ host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, host->dma_base))
+ return -ENXIO;
+
+ host->dev = dev;
+ host->data = data;
+ host->controller.ops = &ls1x_nand_controller_ops;
+
+ nand_controller_init(&host->controller);
+
+ ret = ls1x_nand_controller_init(host);
+ if (ret)
+ goto err;
+
+ ret = ls1x_nand_chip_init(host);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+err:
+ ls1x_nand_controller_cleanup(host);
+
+ return ret;
+}
+
+static void ls1x_nand_remove(struct platform_device *pdev)
+{
+ struct ls1x_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ ls1x_nand_controller_cleanup(host);
+}
+
+static const struct ls1x_nand_data ls1b_nand_data = {
+ .status_field = GENMASK(15, 8),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .set_addr = ls1b_nand_set_addr,
+};
+
+static const struct ls1x_nand_data ls1c_nand_data = {
+ .status_field = GENMASK(23, 16),
+ .op_scope_field = GENMASK(29, 16),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .set_addr = ls1c_nand_set_addr,
+};
+
+static const struct of_device_id ls1x_nand_match[] = {
+ {
+ .compatible = "loongson,ls1b-nand-controller",
+ .data = &ls1b_nand_data,
+ },
+ {
+ .compatible = "loongson,ls1c-nand-controller",
+ .data = &ls1c_nand_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls1x_nand_match);
+
+static struct platform_driver ls1x_nand_driver = {
+ .probe = ls1x_nand_probe,
+ .remove = ls1x_nand_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ls1x_nand_match,
+ },
+};
+
+module_platform_driver(ls1x_nand_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 5eaa0be367cd..1003cf118c01 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -1863,7 +1863,12 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
const struct nand_op_instr *instr = NULL;
unsigned int op_id = 0;
unsigned int len = 0;
- int ret;
+ int ret, reg_base;
+
+ reg_base = NAND_READ_LOCATION_0;
+
+ if (nandc->props->qpic_version2)
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
ret = qcom_parse_instructions(chip, subop, &q_op);
if (ret)
@@ -1915,14 +1920,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
- nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+ if (nandc->props->qpic_version2)
+ nandc_set_read_loc_last(chip, reg_base, 0, len, 1);
+ else
+ nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
if (!nandc->props->qpic_version2) {
qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
}
- nandc->buf_count = len;
+ nandc->buf_count = 512;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
config_nand_single_cw_page_read(chip, false, 0);
@@ -2360,6 +2368,7 @@ static const struct qcom_nandc_props ipq806x_nandc_props = {
.supports_bam = false,
.use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq4019_nandc_props = {
@@ -2367,6 +2376,7 @@ static const struct qcom_nandc_props ipq4019_nandc_props = {
.supports_bam = true,
.nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
@@ -2374,6 +2384,7 @@ static const struct qcom_nandc_props ipq8074_nandc_props = {
.supports_bam = true,
.nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props sdx55_nandc_props = {
@@ -2382,6 +2393,7 @@ static const struct qcom_nandc_props sdx55_nandc_props = {
.nandc_part_of_qpic = true,
.qpic_version2 = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
/*
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index fab371e3e9b7..162cd5f4f234 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
@@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
index 6046c73f8424..2ee498230ec1 100644
--- a/drivers/mtd/nand/spi/alliancememory.c
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -17,20 +17,20 @@
#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int am_get_eccsize(struct mtd_info *mtd)
{
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
index bb5298911137..2b4df1d917ac 100644
--- a/drivers/mtd/nand/spi/ato.c
+++ b/drivers/mtd/nand/spi/ato.c
@@ -14,17 +14,17 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index d16e42cf8fae..7099db7a62be 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -22,7 +22,7 @@
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
int ret;
@@ -36,7 +36,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
*spinand->scratchbuf = val;
@@ -362,7 +362,7 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
static int spinand_write_enable_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -372,7 +372,7 @@ static int spinand_load_page_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+ struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -519,7 +519,7 @@ static int spinand_program_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -529,7 +529,7 @@ static int spinand_erase_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
- struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -549,8 +549,8 @@ static int spinand_erase_op(struct spinand_device *spinand,
int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
unsigned long poll_delay_us, u8 *s)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
- spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(REG_STATUS,
+ spinand->scratchbuf);
u8 status;
int ret;
@@ -583,7 +583,7 @@ out:
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(
+ struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP(
naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
@@ -596,7 +596,7 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
static int spinand_reset_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_RESET_OP;
+ struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP;
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index a164d821464d..9e286612a296 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -18,18 +18,18 @@
(CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* OOB spare area map (64 bytes)
@@ -137,8 +137,8 @@ static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
- struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
- struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
u8 status;
int ret;
@@ -199,7 +199,7 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
- 0x7f, 0x7f),
+ 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
index ecd5f6bffa33..7c61644bfb10 100644
--- a/drivers/mtd/nand/spi/foresee.c
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -12,18 +12,18 @@
#define SPINAND_MFR_FORESEE 0xCD
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index d620bb02a20a..cb1d316fc4d8 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -24,44 +24,44 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(0, 0, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -185,7 +185,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;
@@ -228,7 +228,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 1ef08ad850a2..eeaf5bf9f082 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -28,18 +28,18 @@ struct macronix_priv {
};
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -148,8 +148,8 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
static int macronix_set_read_retry(struct spinand_device *spinand,
unsigned int retry_mode)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
- spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
+ spinand->scratchbuf);
*spinand->scratchbuf = retry_mode;
return spi_mem_exec_op(spinand->spimem, &op);
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 691f8a2e0791..8281c9d3f4f7 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -35,33 +35,33 @@
(CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x4_update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/* Micron MT29F2G01AAAED Device */
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x1_update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -137,7 +137,7 @@ static const struct mtd_ooblayout_ops micron_4_ooblayout = {
static int micron_select_target(struct spinand_device *spinand,
unsigned int target)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MICRON_DIE_SELECT_REG,
spinand->scratchbuf);
if (target > 1)
@@ -251,8 +251,8 @@ static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
- struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
- struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
u8 status;
int ret;
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 6e7cc6995380..4670bac41245 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -22,20 +22,20 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c
index 961df0d74984..51d61785df61 100644
--- a/drivers/mtd/nand/spi/skyhigh.c
+++ b/drivers/mtd/nand/spi/skyhigh.c
@@ -17,20 +17,20 @@
#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 2e2106b2705f..4c6923047aeb 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -15,28 +15,28 @@
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* Backward compatibility for 1st generation Serial NAND devices
* which don't support Quad Program Load operation.
*/
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 8394a1b1fb0c..19f8dd4a6370 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -23,34 +23,50 @@
* "X4" in the core is equivalent to "quad output" in the datasheets.
*/
-static SPINAND_OP_VARIANTS(read_cache_dtr_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
+static SPINAND_OP_VARIANTS(read_cache_octal_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 86 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_8S_OP(0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -141,12 +157,47 @@ static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.free = w25n02kv_ooblayout_free,
};
+static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 12;
+ region->length = 4;
+
+ return 0;
+}
+
+static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = 16 * section;
+ region->length = 12;
+
+ /* Extract BBM */
+ if (!section) {
+ region->offset += 2;
+ region->length -= 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w35n01jw_ooblayout = {
+ .ecc = w35n01jw_ooblayout_ecc,
+ .free = w35n01jw_ooblayout_free,
+};
+
static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -213,7 +264,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
@@ -227,6 +278,33 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W35N01JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdc, 0x21),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
+ SPINAND_INFO("W35N02JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 2, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
+ SPINAND_INFO("W35N04JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 4, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
/* 2G-bit densities */
SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
@@ -242,7 +320,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index 3f539ca0de86..37336d5958a9 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -23,20 +23,20 @@
#define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 55644a3cd88c..e97f5cbd9aad 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -58,6 +58,31 @@ macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor)
return 0;
}
+static int
+mx25l3255e_late_init_fixups(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /*
+ * SFDP of MX25L3255E is JESD216, which does not include the Quad
+ * Enable bit Requirement in BFPT. As a result, during BFPT parsing,
+ * the quad_enable method is not set to spi_nor_sr1_bit6_quad_enable.
+ * Therefore, it is necessary to correct this setting by late_init.
+ */
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+
+ /*
+ * In addition, MX25L3255E also supports 1-4-4 page program in 3-byte
+ * address mode. However, since the 3-byte address 1-4-4 page program
+ * is not defined in SFDP, it needs to be configured in late_init.
+ */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_4_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_4_4],
+ SPINOR_OP_PP_1_4_4, SNOR_PROTO_1_4_4);
+
+ return 0;
+}
+
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
.post_sfdp = macronix_qpp4b_post_sfdp_fixups,
@@ -67,6 +92,10 @@ static const struct spi_nor_fixups macronix_qpp4b_fixups = {
.post_sfdp = macronix_qpp4b_post_sfdp_fixups,
};
+static const struct spi_nor_fixups mx25l3255e_fixups = {
+ .late_init = mx25l3255e_late_init_fixups,
+};
+
static const struct flash_info macronix_nor_parts[] = {
{
.id = SNOR_ID(0xc2, 0x20, 0x10),
@@ -88,10 +117,8 @@ static const struct flash_info macronix_nor_parts[] = {
.name = "mx25l8005",
.size = SZ_1M,
}, {
+ /* MX25L1606E */
.id = SNOR_ID(0xc2, 0x20, 0x15),
- .name = "mx25l1606e",
- .size = SZ_2M,
- .no_sfdp_flags = SECT_4K,
}, {
.id = SNOR_ID(0xc2, 0x20, 0x16),
.name = "mx25l3205d",
@@ -103,29 +130,21 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_8M,
.no_sfdp_flags = SECT_4K,
}, {
+ /* MX25L12805D */
.id = SNOR_ID(0xc2, 0x20, 0x18),
- .name = "mx25l12805d",
- .size = SZ_16M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP,
- .no_sfdp_flags = SECT_4K,
}, {
+ /* MX25L25635E, MX25L25645G */
.id = SNOR_ID(0xc2, 0x20, 0x19),
- .name = "mx25l25635e",
- .size = SZ_32M,
- .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixups = &mx25l25635_fixups
}, {
+ /* MX66L51235F */
.id = SNOR_ID(0xc2, 0x20, 0x1a),
- .name = "mx66l51235f",
- .size = SZ_64M,
- .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
+ /* MX66L1G45G */
.id = SNOR_ID(0xc2, 0x20, 0x1b),
- .name = "mx66l1g45g",
- .size = SZ_128M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66L2G45G */
@@ -167,29 +186,16 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_16M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
+ /* MX25U51245G */
.id = SNOR_ID(0xc2, 0x25, 0x3a),
- .name = "mx25u51245g",
- .size = SZ_64M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
- .fixup_flags = SPI_NOR_4B_OPCODES,
- .fixups = &macronix_qpp4b_fixups,
- }, {
- .id = SNOR_ID(0xc2, 0x25, 0x3a),
- .name = "mx66u51235f",
- .size = SZ_64M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
- .fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66U1G45G */
.id = SNOR_ID(0xc2, 0x25, 0x3b),
.fixups = &macronix_qpp4b_fixups,
}, {
+ /* MX66U2G45G */
.id = SNOR_ID(0xc2, 0x25, 0x3c),
- .name = "mx66u2g45g",
- .size = SZ_256M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
- .fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x26, 0x18),
@@ -215,15 +221,14 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_4M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
+ /* MX25UW51245G */
.id = SNOR_ID(0xc2, 0x81, 0x3a),
- .name = "mx25uw51245g",
.n_banks = 4,
.flags = SPI_NOR_RWW,
}, {
+ /* MX25L3255E */
.id = SNOR_ID(0xc2, 0x9e, 0x16),
- .name = "mx25l3255e",
- .size = SZ_4M,
- .no_sfdp_flags = SECT_4K,
+ .fixups = &mx25l3255e_fixups,
},
/*
* This spares us of adding new flash entries for flashes that can be
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 4ffaf7588885..3504507477c6 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -391,6 +391,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
case GDMA_EQE_HWC_INIT_DATA:
case GDMA_EQE_HWC_INIT_DONE:
+ case GDMA_EQE_HWC_SOC_SERVICE:
case GDMA_EQE_RNIC_QP_FATAL:
if (!eq->eq.callback)
break;
@@ -964,6 +965,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
err, resp.hdr.status);
return err ? err : -EPROTO;
}
+ gc->pf_cap_flags1 = resp.pf_cap_flags1;
if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
if (err) {
@@ -1004,7 +1006,6 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_register_device, "NET_MANA");
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -1035,7 +1036,6 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_deregister_device, "NET_MANA");
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1469,10 +1469,14 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_gd_init_registers(pdev);
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
+ gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0);
+ if (!gc->service_wq)
+ return -ENOMEM;
+
err = mana_gd_setup_irqs(pdev);
if (err) {
dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
- return err;
+ goto free_workqueue;
}
err = mana_hwc_create_channel(gc);
@@ -1498,6 +1502,8 @@ destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+free_workqueue:
+ destroy_workqueue(gc->service_wq);
dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1509,6 +1515,8 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+
+ destroy_workqueue(gc->service_wq);
dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
@@ -1578,8 +1586,14 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto cleanup_gd;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ goto cleanup_mana;
+
return 0;
+cleanup_mana:
+ mana_remove(&gc->mana, false);
cleanup_gd:
mana_gd_cleanup(pdev);
unmap_bar:
@@ -1607,6 +1621,7 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, false);
mana_gd_cleanup(pdev);
@@ -1630,6 +1645,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
@@ -1654,6 +1670,10 @@ static int mana_gd_resume(struct pci_dev *pdev)
if (err)
return err;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ return err;
+
return 0;
}
@@ -1664,6 +1684,7 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 1ba49602089b..a8c4d8db75a5 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -112,11 +112,13 @@ out:
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
struct gdma_event *event)
{
+ union hwc_init_soc_service_type service_data;
struct hw_channel_context *hwc = ctx;
struct gdma_dev *gd = hwc->gdma_dev;
union hwc_init_type_data type_data;
union hwc_init_eq_id_db eq_db;
u32 type, val;
+ int ret;
switch (event->type) {
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
@@ -199,7 +201,24 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
}
break;
+ case GDMA_EQE_HWC_SOC_SERVICE:
+ service_data.as_uint32 = event->details[0];
+ type = service_data.type;
+ switch (type) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ ret = mana_rdma_service_event(gd->gdma_context, type);
+ if (ret)
+ dev_err(hwc->dev, "Failed to schedule adev service event: %d\n",
+ ret);
+ break;
+ default:
+ dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type);
+ break;
+ }
+
+ break;
default:
dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
/* Ignore unknown events, which should never happen. */
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 9c58d9e0bbb5..ccd2885c939e 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2950,7 +2950,7 @@ static void remove_adev(struct gdma_dev *gd)
gd->adev = NULL;
}
-static int add_adev(struct gdma_dev *gd)
+static int add_adev(struct gdma_dev *gd, const char *name)
{
struct auxiliary_device *adev;
struct mana_adev *madev;
@@ -2966,7 +2966,7 @@ static int add_adev(struct gdma_dev *gd)
goto idx_fail;
adev->id = ret;
- adev->name = "rdma";
+ adev->name = name;
adev->dev.parent = gd->gdma_context->dev;
adev->dev.release = adev_release;
madev->mdev = gd;
@@ -2998,6 +2998,70 @@ idx_fail:
return ret;
}
+static void mana_rdma_service_handle(struct work_struct *work)
+{
+ struct mana_service_work *serv_work =
+ container_of(work, struct mana_service_work, work);
+ struct gdma_dev *gd = serv_work->gdma_dev;
+ struct device *dev = gd->gdma_context->dev;
+ int ret;
+
+ if (READ_ONCE(gd->rdma_teardown))
+ goto out;
+
+ switch (serv_work->event) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ if (!gd->adev || gd->is_suspended)
+ break;
+
+ remove_adev(gd);
+ gd->is_suspended = true;
+ break;
+
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ if (!gd->is_suspended)
+ break;
+
+ ret = add_adev(gd, "rdma");
+ if (ret)
+ dev_err(dev, "Failed to add adev on resume: %d\n", ret);
+ else
+ gd->is_suspended = false;
+ break;
+
+ default:
+ dev_warn(dev, "unknown adev service event %u\n",
+ serv_work->event);
+ break;
+ }
+
+out:
+ kfree(serv_work);
+}
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
+{
+ struct gdma_dev *gd = &gc->mana_ib;
+ struct mana_service_work *serv_work;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return 0;
+ }
+
+ serv_work = kzalloc(sizeof(*serv_work), GFP_ATOMIC);
+ if (!serv_work)
+ return -ENOMEM;
+
+ serv_work->event = event;
+ serv_work->gdma_dev = gd;
+
+ INIT_WORK(&serv_work->work, mana_rdma_service_handle);
+ queue_work(gc->service_wq, &serv_work->work);
+
+ return 0;
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
@@ -3085,7 +3149,7 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
}
- err = add_adev(gd);
+ err = add_adev(gd, "eth");
out:
if (err) {
mana_remove(gd, false);
@@ -3159,6 +3223,44 @@ out:
dev_dbg(dev, "%s succeeded\n", __func__);
}
+int mana_rdma_probe(struct gdma_dev *gd)
+{
+ int err = 0;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return err;
+ }
+
+ err = mana_gd_register_device(gd);
+ if (err)
+ return err;
+
+ err = add_adev(gd, "rdma");
+ if (err)
+ mana_gd_deregister_device(gd);
+
+ return err;
+}
+
+void mana_rdma_remove(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return;
+ }
+
+ WRITE_ONCE(gd->rdma_teardown, true);
+ flush_workqueue(gc->service_wq);
+
+ if (gd->adev)
+ remove_adev(gd);
+
+ mana_gd_deregister_device(gd);
+}
+
struct net_device *mana_get_primary_netdev(struct mana_context *ac,
u32 port_index,
netdevice_tracker *tracker)
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index 339b148d6793..c776543cbba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -1757,7 +1757,7 @@ iwl_mld_send_proto_offload(struct iwl_mld *mld,
addrconf_addr_solict_mult(&wowlan_data->target_ipv6_addrs[i],
&solicited_addr);
- for (j = 0; j < c; j++)
+ for (j = 0; j < n_nsc && j < c; j++)
if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
&solicited_addr) == 0)
break;
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 4c253b433bf7..4904097dfd49 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -3,7 +3,7 @@
config NVME_TARGET
tristate "NVMe Target support"
depends on BLOCK
- depends on CONFIGFS_FS
+ select CONFIGFS_FS
select NVME_KEYRING if NVME_TARGET_TCP_TLS
select KEYS if NVME_TARGET_TCP_TLS
select SGL_ALLOC
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index aedd0e2dcd89..0edd639898a6 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -25,6 +25,7 @@
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
+#include <linux/kexec_handover.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -875,6 +876,36 @@ void __init early_init_dt_check_for_usable_mem_range(void)
memblock_add(rgn[i].base, rgn[i].size);
}
+/**
+ * early_init_dt_check_kho - Decode info required for kexec handover from DT
+ */
+static void __init early_init_dt_check_kho(void)
+{
+ unsigned long node = chosen_node_offset;
+ u64 fdt_start, fdt_size, scratch_start, scratch_size;
+ const __be32 *p;
+ int l;
+
+ if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER) || (long)node < 0)
+ return;
+
+ p = of_get_flat_dt_prop(node, "linux,kho-fdt", &l);
+ if (l != (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32))
+ return;
+
+ fdt_start = dt_mem_next_cell(dt_root_addr_cells, &p);
+ fdt_size = dt_mem_next_cell(dt_root_addr_cells, &p);
+
+ p = of_get_flat_dt_prop(node, "linux,kho-scratch", &l);
+ if (l != (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32))
+ return;
+
+ scratch_start = dt_mem_next_cell(dt_root_addr_cells, &p);
+ scratch_size = dt_mem_next_cell(dt_root_addr_cells, &p);
+
+ kho_populate(fdt_start, fdt_size, scratch_start, scratch_size);
+}
+
#ifdef CONFIG_SERIAL_EARLYCON
int __init early_init_dt_scan_chosen_stdout(void)
@@ -1169,6 +1200,9 @@ void __init early_init_dt_scan_nodes(void)
/* Handle linux,usable-memory-range property */
early_init_dt_check_for_usable_mem_range();
+
+ /* Handle kexec handover */
+ early_init_dt_check_kho();
}
bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys)
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index 5b924597a4de..1ee2d31816ae 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -264,6 +264,43 @@ static inline int setup_ima_buffer(const struct kimage *image, void *fdt,
}
#endif /* CONFIG_IMA_KEXEC */
+static int kho_add_chosen(const struct kimage *image, void *fdt, int chosen_node)
+{
+ int ret = 0;
+#ifdef CONFIG_KEXEC_HANDOVER
+ phys_addr_t fdt_mem = 0;
+ phys_addr_t fdt_len = 0;
+ phys_addr_t scratch_mem = 0;
+ phys_addr_t scratch_len = 0;
+
+ ret = fdt_delprop(fdt, chosen_node, "linux,kho-fdt");
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ return ret;
+ ret = fdt_delprop(fdt, chosen_node, "linux,kho-scratch");
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ return ret;
+
+ if (!image->kho.fdt || !image->kho.scratch)
+ return 0;
+
+ fdt_mem = image->kho.fdt;
+ fdt_len = PAGE_SIZE;
+ scratch_mem = image->kho.scratch->mem;
+ scratch_len = image->kho.scratch->bufsz;
+
+ pr_debug("Adding kho metadata to DT");
+
+ ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, "linux,kho-fdt",
+ fdt_mem, fdt_len);
+ if (ret)
+ return ret;
+ ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, "linux,kho-scratch",
+ scratch_mem, scratch_len);
+
+#endif /* CONFIG_KEXEC_HANDOVER */
+ return ret;
+}
+
/*
* of_kexec_alloc_and_setup_fdt - Alloc and setup a new Flattened Device Tree
*
@@ -414,6 +451,11 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
#endif
}
+ /* Add kho metadata if this is a KHO image */
+ ret = kho_add_chosen(image, fdt, chosen_node);
+ if (ret)
+ goto out;
+
/* add bootargs */
if (cmdline) {
ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index e1eaa24559a2..ef5d655a0052 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -50,6 +50,7 @@
#include <linux/irqdomain.h>
#include <linux/acpi.h>
#include <linux/sizes.h>
+#include <linux/of_irq.h>
#include <asm/mshyperv.h>
/*
@@ -309,8 +310,6 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size);
void *compl_ctxt;
-
- struct pci_message message[];
};
/*
@@ -817,9 +816,17 @@ static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
int ret;
fwspec.fwnode = domain->parent->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = hwirq;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ if (is_of_node(fwspec.fwnode)) {
+ /* SPI lines for OF translations start at offset 32 */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq - 32;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+ } else {
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ }
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (ret)
@@ -887,10 +894,44 @@ static const struct irq_domain_ops hv_pci_domain_ops = {
.activate = hv_pci_vec_irq_domain_activate,
};
+#ifdef CONFIG_OF
+
+static struct irq_domain *hv_pci_of_irq_domain_parent(void)
+{
+ struct device_node *parent;
+ struct irq_domain *domain;
+
+ parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
+ if (!parent)
+ return NULL;
+ domain = irq_find_host(parent);
+ of_node_put(parent);
+
+ return domain;
+}
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
+{
+ acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
+
+ gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
+ if (!gsi_domain_disp_fn)
+ return NULL;
+ return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
+ DOMAIN_BUS_ANY);
+}
+
+#endif
+
static int hv_pci_irqchip_init(void)
{
static struct hv_pci_chip_data *chip_data;
struct fwnode_handle *fn = NULL;
+ struct irq_domain *irq_domain_parent = NULL;
int ret = -ENOMEM;
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
@@ -907,9 +948,24 @@ static int hv_pci_irqchip_init(void)
* way to ensure that all the corresponding devices are also gone and
* no interrupts will be generated.
*/
- hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
- fn, &hv_pci_domain_ops,
- chip_data);
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ irq_domain_parent = hv_pci_acpi_irq_domain_parent();
+#endif
+#ifdef CONFIG_OF
+ if (!irq_domain_parent)
+ irq_domain_parent = hv_pci_of_irq_domain_parent();
+#endif
+ if (!irq_domain_parent) {
+ WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
+ ret = -EINVAL;
+ goto free_chip;
+ }
+
+ hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
+ HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
if (!hv_msi_gic_irq_domain) {
pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
@@ -1438,7 +1494,7 @@ static int hv_read_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_read_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- read_blk = (struct pci_read_block *)&pkt.pkt.message;
+ read_blk = (struct pci_read_block *)pkt.buf;
read_blk->message_type.type = PCI_READ_BLOCK;
read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
read_blk->block_id = block_id;
@@ -1518,7 +1574,7 @@ static int hv_write_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_write_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- write_blk = (struct pci_write_block *)&pkt.pkt.message;
+ write_blk = (struct pci_write_block *)pkt.buf;
write_blk->message_type.type = PCI_WRITE_BLOCK;
write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
write_blk->block_id = block_id;
@@ -1599,7 +1655,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
return;
}
memset(&ctxt, 0, sizeof(ctxt));
- int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+ int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
@@ -2482,7 +2538,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
comp_pkt.hpdev = hpdev;
pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements;
- res_req = (struct pci_child_message *)&pkt.init_packet.message;
+ res_req = (struct pci_child_message *)pkt.buffer;
res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot;
@@ -2860,7 +2916,7 @@ static void hv_eject_device_work(struct work_struct *work)
pci_destroy_slot(hpdev->pci_slot);
memset(&ctxt, 0, sizeof(ctxt));
- ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
@@ -3118,7 +3174,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev,
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- version_req = (struct pci_version_request *)&pkt->message;
+ version_req = (struct pci_version_request *)(pkt + 1);
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
for (i = 0; i < num_version; i++) {
@@ -3340,7 +3396,7 @@ enter_d0_retry:
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+ d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start;
@@ -3498,20 +3554,20 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
- (struct pci_resources_assigned *)&pkt->message;
+ (struct pci_resources_assigned *)(pkt + 1);
res_assigned->message_type.type =
PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
} else {
res_assigned2 =
- (struct pci_resources_assigned2 *)&pkt->message;
+ (struct pci_resources_assigned2 *)(pkt + 1);
res_assigned2->message_type.type =
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
put_pcichild(hpdev);
- ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+ ret = vmbus_sendpacket(hdev->channel, pkt + 1,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3809,6 +3865,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
+ struct pci_message *msg;
struct hv_pci_compl comp_pkt;
struct hv_pci_dev *hpdev, *tmp;
unsigned long flags;
@@ -3854,10 +3911,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt;
- pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
+ msg = (struct pci_message *)pkt.buffer;
+ msg->type = PCI_BUS_D0EXIT;
- ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
- sizeof(struct pci_message),
+ ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
(unsigned long)&pkt.teardown_packet,
&trans_id, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
index 13e37b49d9d0..61cff5f7e02e 100644
--- a/drivers/platform/cznic/Kconfig
+++ b/drivers/platform/cznic/Kconfig
@@ -76,6 +76,23 @@ config TURRIS_OMNIA_MCU_TRNG
Say Y here to add support for the true random number generator
provided by CZ.NIC's Turris Omnia MCU.
+config TURRIS_OMNIA_MCU_KEYCTL
+ bool "Turris Omnia MCU ECDSA message signing"
+ default y
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ depends on TURRIS_OMNIA_MCU_GPIO
+ select TURRIS_SIGNING_KEY
+ help
+ Say Y here to add support for ECDSA message signing with board private
+ key (if available on the MCU). This is exposed via the keyctl()
+ syscall.
+
endif # TURRIS_OMNIA_MCU
+config TURRIS_SIGNING_KEY
+ tristate
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+
endif # CZNIC_PLATFORMS
diff --git a/drivers/platform/cznic/Makefile b/drivers/platform/cznic/Makefile
index ce6d997f34d6..ccad7bec82e1 100644
--- a/drivers/platform/cznic/Makefile
+++ b/drivers/platform/cznic/Makefile
@@ -3,6 +3,9 @@
obj-$(CONFIG_TURRIS_OMNIA_MCU) += turris-omnia-mcu.o
turris-omnia-mcu-y := turris-omnia-mcu-base.o
turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_GPIO) += turris-omnia-mcu-gpio.o
+turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_KEYCTL) += turris-omnia-mcu-keyctl.o
turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP) += turris-omnia-mcu-sys-off-wakeup.o
turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_TRNG) += turris-omnia-mcu-trng.o
turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_WATCHDOG) += turris-omnia-mcu-watchdog.o
+
+obj-$(CONFIG_TURRIS_SIGNING_KEY) += turris-signing-key.o
diff --git a/drivers/platform/cznic/turris-omnia-mcu-base.c b/drivers/platform/cznic/turris-omnia-mcu-base.c
index 770e680b96f9..e8fc0d7b3343 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-base.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-base.c
@@ -392,6 +392,10 @@ static int omnia_mcu_probe(struct i2c_client *client)
if (err)
return err;
+ err = omnia_mcu_register_keyctl(mcu);
+ if (err)
+ return err;
+
return omnia_mcu_register_trng(mcu);
}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
index 5f35f7c5d5d7..c2df24ea8686 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-gpio.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/devm-helpers.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -195,7 +196,7 @@ static const struct omnia_gpio omnia_gpios[64] = {
};
/* mapping from interrupts to indexes of GPIOs in the omnia_gpios array */
-const u8 omnia_int_to_gpio_idx[32] = {
+static const u8 omnia_int_to_gpio_idx[32] = {
[__bf_shf(OMNIA_INT_CARD_DET)] = 4,
[__bf_shf(OMNIA_INT_MSATA_IND)] = 5,
[__bf_shf(OMNIA_INT_USB30_OVC)] = 6,
@@ -1093,3 +1094,21 @@ int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
return 0;
}
+
+int omnia_mcu_request_irq(struct omnia_mcu *mcu, u32 spec,
+ irq_handler_t thread_fn, const char *devname)
+{
+ u8 irq_idx;
+ int irq;
+
+ if (!spec)
+ return -EINVAL;
+
+ irq_idx = omnia_int_to_gpio_idx[ffs(spec) - 1];
+ irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx));
+ if (irq < 0)
+ return irq;
+
+ return devm_request_threaded_irq(&mcu->client->dev, irq, NULL,
+ thread_fn, IRQF_ONESHOT, devname, mcu);
+}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-keyctl.c b/drivers/platform/cznic/turris-omnia-mcu-keyctl.c
new file mode 100644
index 000000000000..dc40f942f082
--- /dev/null
+++ b/drivers/platform/cznic/turris-omnia-mcu-keyctl.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CZ.NIC's Turris Omnia MCU ECDSA message signing via keyctl
+ *
+ * 2025 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <crypto/sha2.h>
+#include <linux/cleanup.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/key.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/turris-omnia-mcu-interface.h>
+#include <linux/turris-signing-key.h>
+#include "turris-omnia-mcu.h"
+
+static irqreturn_t omnia_msg_signed_irq_handler(int irq, void *dev_id)
+{
+ u8 reply[1 + OMNIA_MCU_CRYPTO_SIGNATURE_LEN];
+ struct omnia_mcu *mcu = dev_id;
+ int err;
+
+ err = omnia_cmd_read(mcu->client, OMNIA_CMD_CRYPTO_COLLECT_SIGNATURE,
+ reply, sizeof(reply));
+ if (!err && reply[0] != OMNIA_MCU_CRYPTO_SIGNATURE_LEN)
+ err = -EIO;
+
+ guard(mutex)(&mcu->sign_lock);
+
+ if (mcu->sign_requested) {
+ mcu->sign_err = err;
+ if (!err)
+ memcpy(mcu->signature, &reply[1],
+ OMNIA_MCU_CRYPTO_SIGNATURE_LEN);
+ mcu->sign_requested = false;
+ complete(&mcu->msg_signed);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int omnia_mcu_sign(const struct key *key, const void *msg,
+ void *signature)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(turris_signing_key_get_dev(key));
+ u8 cmd[1 + SHA256_DIGEST_SIZE], reply;
+ int err;
+
+ scoped_guard(mutex, &mcu->sign_lock) {
+ if (mcu->sign_requested)
+ return -EBUSY;
+
+ cmd[0] = OMNIA_CMD_CRYPTO_SIGN_MESSAGE;
+ memcpy(&cmd[1], msg, SHA256_DIGEST_SIZE);
+
+ err = omnia_cmd_write_read(mcu->client, cmd, sizeof(cmd),
+ &reply, 1);
+ if (err)
+ return err;
+
+ if (!reply)
+ return -EBUSY;
+
+ mcu->sign_requested = true;
+ }
+
+ if (wait_for_completion_interruptible(&mcu->msg_signed))
+ return -EINTR;
+
+ guard(mutex)(&mcu->sign_lock);
+
+ if (mcu->sign_err)
+ return mcu->sign_err;
+
+ memcpy(signature, mcu->signature, OMNIA_MCU_CRYPTO_SIGNATURE_LEN);
+
+ /* forget the signature, for security */
+ memzero_explicit(mcu->signature, sizeof(mcu->signature));
+
+ return OMNIA_MCU_CRYPTO_SIGNATURE_LEN;
+}
+
+static const void *omnia_mcu_get_public_key(const struct key *key)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(turris_signing_key_get_dev(key));
+
+ return mcu->board_public_key;
+}
+
+static const struct turris_signing_key_subtype omnia_signing_key_subtype = {
+ .key_size = 256,
+ .data_size = SHA256_DIGEST_SIZE,
+ .sig_size = OMNIA_MCU_CRYPTO_SIGNATURE_LEN,
+ .public_key_size = OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN,
+ .hash_algo = "sha256",
+ .get_public_key = omnia_mcu_get_public_key,
+ .sign = omnia_mcu_sign,
+};
+
+static int omnia_mcu_read_public_key(struct omnia_mcu *mcu)
+{
+ u8 reply[1 + OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN];
+ int err;
+
+ err = omnia_cmd_read(mcu->client, OMNIA_CMD_CRYPTO_GET_PUBLIC_KEY,
+ reply, sizeof(reply));
+ if (err)
+ return err;
+
+ if (reply[0] != OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN)
+ return -EIO;
+
+ memcpy(mcu->board_public_key, &reply[1],
+ OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN);
+
+ return 0;
+}
+
+int omnia_mcu_register_keyctl(struct omnia_mcu *mcu)
+{
+ struct device *dev = &mcu->client->dev;
+ char desc[48];
+ int err;
+
+ if (!(mcu->features & OMNIA_FEAT_CRYPTO))
+ return 0;
+
+ err = omnia_mcu_read_public_key(mcu);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot read board public key\n");
+
+ err = devm_mutex_init(dev, &mcu->sign_lock);
+ if (err)
+ return err;
+
+ init_completion(&mcu->msg_signed);
+
+ err = omnia_mcu_request_irq(mcu, OMNIA_INT_MESSAGE_SIGNED,
+ omnia_msg_signed_irq_handler,
+ "turris-omnia-mcu-keyctl");
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot request MESSAGE_SIGNED IRQ\n");
+
+ sprintf(desc, "Turris Omnia SN %016llX MCU ECDSA key",
+ mcu->board_serial_number);
+
+ err = devm_turris_signing_key_create(dev, &omnia_signing_key_subtype,
+ desc);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot create signing key\n");
+
+ return 0;
+}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-trng.c b/drivers/platform/cznic/turris-omnia-mcu-trng.c
index 9a1d9292dc9a..e3826959e6de 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-trng.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-trng.c
@@ -5,12 +5,9 @@
* 2024 by Marek Behún <kabel@kernel.org>
*/
-#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/container_of.h>
#include <linux/errno.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/driver.h>
#include <linux/hw_random.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -62,17 +59,12 @@ static int omnia_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
int omnia_mcu_register_trng(struct omnia_mcu *mcu)
{
struct device *dev = &mcu->client->dev;
- u8 irq_idx, dummy;
- int irq, err;
+ u8 dummy;
+ int err;
if (!(mcu->features & OMNIA_FEAT_TRNG))
return 0;
- irq_idx = omnia_int_to_gpio_idx[__bf_shf(OMNIA_INT_TRNG)];
- irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx));
- if (irq < 0)
- return dev_err_probe(dev, irq, "Cannot get TRNG IRQ\n");
-
/*
* If someone else cleared the TRNG interrupt but did not read the
* entropy, a new interrupt won't be generated, and entropy collection
@@ -86,9 +78,8 @@ int omnia_mcu_register_trng(struct omnia_mcu *mcu)
init_completion(&mcu->trng_entropy_ready);
- err = devm_request_threaded_irq(dev, irq, NULL, omnia_trng_irq_handler,
- IRQF_ONESHOT, "turris-omnia-mcu-trng",
- mcu);
+ err = omnia_mcu_request_irq(mcu, OMNIA_INT_TRNG, omnia_trng_irq_handler,
+ "turris-omnia-mcu-trng");
if (err)
return dev_err_probe(dev, err, "Cannot request TRNG IRQ\n");
diff --git a/drivers/platform/cznic/turris-omnia-mcu.h b/drivers/platform/cznic/turris-omnia-mcu.h
index 088541be3f4c..8473a3031917 100644
--- a/drivers/platform/cznic/turris-omnia-mcu.h
+++ b/drivers/platform/cznic/turris-omnia-mcu.h
@@ -12,11 +12,17 @@
#include <linux/gpio/driver.h>
#include <linux/hw_random.h>
#include <linux/if_ether.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/workqueue.h>
+enum {
+ OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN = 1 + 32,
+ OMNIA_MCU_CRYPTO_SIGNATURE_LEN = 64,
+};
+
struct i2c_client;
struct rtc_device;
@@ -55,6 +61,12 @@ struct rtc_device;
* @wdt: watchdog driver structure
* @trng: RNG driver structure
* @trng_entropy_ready: RNG entropy ready completion
+ * @msg_signed: message signed completion
+ * @sign_lock: mutex to protect message signing state
+ * @sign_requested: flag indicating that message signing was requested but not completed
+ * @sign_err: message signing error number, filled in interrupt handler
+ * @signature: message signing signature, filled in interrupt handler
+ * @board_public_key: board public key, if stored in MCU
*/
struct omnia_mcu {
struct i2c_client *client;
@@ -88,12 +100,22 @@ struct omnia_mcu {
struct hwrng trng;
struct completion trng_entropy_ready;
#endif
+
+#ifdef CONFIG_TURRIS_OMNIA_MCU_KEYCTL
+ struct completion msg_signed;
+ struct mutex sign_lock;
+ bool sign_requested;
+ int sign_err;
+ u8 signature[OMNIA_MCU_CRYPTO_SIGNATURE_LEN];
+ u8 board_public_key[OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN];
+#endif
};
#ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO
-extern const u8 omnia_int_to_gpio_idx[32];
extern const struct attribute_group omnia_mcu_gpio_group;
int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu);
+int omnia_mcu_request_irq(struct omnia_mcu *mcu, u32 spec,
+ irq_handler_t thread_fn, const char *devname);
#else
static inline int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
{
@@ -101,6 +123,15 @@ static inline int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
}
#endif
+#ifdef CONFIG_TURRIS_OMNIA_MCU_KEYCTL
+int omnia_mcu_register_keyctl(struct omnia_mcu *mcu);
+#else
+static inline int omnia_mcu_register_keyctl(struct omnia_mcu *mcu)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP
extern const struct attribute_group omnia_mcu_poweroff_group;
int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu);
diff --git a/drivers/platform/cznic/turris-signing-key.c b/drivers/platform/cznic/turris-signing-key.c
new file mode 100644
index 000000000000..3827178565e2
--- /dev/null
+++ b/drivers/platform/cznic/turris-signing-key.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Some of CZ.NIC's Turris devices support signing messages with a per-device unique asymmetric
+ * cryptographic key that was burned into the device at manufacture.
+ *
+ * This helper module exposes this message signing ability via the keyctl() syscall. Upon load, it
+ * creates the `.turris-signing-keys` keyring. A device-specific driver then has to create a signing
+ * key by calling devm_turris_signing_key_create().
+ *
+ * 2025 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/key-type.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/turris-signing-key.h>
+
+static int turris_signing_key_instantiate(struct key *key,
+ struct key_preparsed_payload *payload)
+{
+ return 0;
+}
+
+static void turris_signing_key_describe(const struct key *key, struct seq_file *m)
+{
+ const struct turris_signing_key_subtype *subtype = dereference_key_rcu(key);
+
+ if (!subtype)
+ return;
+
+ seq_printf(m, "%s: %*phN", key->description, subtype->public_key_size,
+ subtype->get_public_key(key));
+}
+
+static long turris_signing_key_read(const struct key *key, char *buffer, size_t buflen)
+{
+ const struct turris_signing_key_subtype *subtype = dereference_key_rcu(key);
+
+ if (!subtype)
+ return -EIO;
+
+ if (buffer) {
+ if (buflen > subtype->public_key_size)
+ buflen = subtype->public_key_size;
+
+ memcpy(buffer, subtype->get_public_key(key), subtype->public_key_size);
+ }
+
+ return subtype->public_key_size;
+}
+
+static bool turris_signing_key_asym_valid_params(const struct turris_signing_key_subtype *subtype,
+ const struct kernel_pkey_params *params)
+{
+ if (params->encoding && strcmp(params->encoding, "raw"))
+ return false;
+
+ if (params->hash_algo && strcmp(params->hash_algo, subtype->hash_algo))
+ return false;
+
+ return true;
+}
+
+static int turris_signing_key_asym_query(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info)
+{
+ const struct turris_signing_key_subtype *subtype = dereference_key_rcu(params->key);
+
+ if (!subtype)
+ return -EIO;
+
+ if (!turris_signing_key_asym_valid_params(subtype, params))
+ return -EINVAL;
+
+ info->supported_ops = KEYCTL_SUPPORTS_SIGN;
+ info->key_size = subtype->key_size;
+ info->max_data_size = subtype->data_size;
+ info->max_sig_size = subtype->sig_size;
+ info->max_enc_size = 0;
+ info->max_dec_size = 0;
+
+ return 0;
+}
+
+static int turris_signing_key_asym_eds_op(struct kernel_pkey_params *params,
+ const void *in, void *out)
+{
+ const struct turris_signing_key_subtype *subtype = dereference_key_rcu(params->key);
+ int err;
+
+ if (!subtype)
+ return -EIO;
+
+ if (!turris_signing_key_asym_valid_params(subtype, params))
+ return -EINVAL;
+
+ if (params->op != kernel_pkey_sign)
+ return -EOPNOTSUPP;
+
+ if (params->in_len != subtype->data_size || params->out_len != subtype->sig_size)
+ return -EINVAL;
+
+ err = subtype->sign(params->key, in, out);
+ if (err)
+ return err;
+
+ return subtype->sig_size;
+}
+
+static struct key_type turris_signing_key_type = {
+ .name = "turris-signing-key",
+ .instantiate = turris_signing_key_instantiate,
+ .describe = turris_signing_key_describe,
+ .read = turris_signing_key_read,
+ .asym_query = turris_signing_key_asym_query,
+ .asym_eds_op = turris_signing_key_asym_eds_op,
+};
+
+static struct key *turris_signing_keyring;
+
+static void turris_signing_key_release(void *key)
+{
+ key_unlink(turris_signing_keyring, key);
+ key_put(key);
+}
+
+int
+devm_turris_signing_key_create(struct device *dev, const struct turris_signing_key_subtype *subtype,
+ const char *desc)
+{
+ struct key *key;
+ key_ref_t kref;
+
+ kref = key_create(make_key_ref(turris_signing_keyring, true),
+ turris_signing_key_type.name, desc, NULL, 0,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ |
+ KEY_USR_SEARCH,
+ KEY_ALLOC_BUILT_IN | KEY_ALLOC_SET_KEEP | KEY_ALLOC_NOT_IN_QUOTA);
+ if (IS_ERR(kref))
+ return PTR_ERR(kref);
+
+ key = key_ref_to_ptr(kref);
+ key->payload.data[1] = dev;
+ rcu_assign_keypointer(key, subtype);
+
+ return devm_add_action_or_reset(dev, turris_signing_key_release, key);
+}
+EXPORT_SYMBOL_GPL(devm_turris_signing_key_create);
+
+static int turris_signing_key_init(void)
+{
+ int err;
+
+ err = register_key_type(&turris_signing_key_type);
+ if (err)
+ return err;
+
+ turris_signing_keyring = keyring_alloc(".turris-signing-keys",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW |
+ KEY_USR_READ | KEY_USR_SEARCH,
+ KEY_ALLOC_BUILT_IN | KEY_ALLOC_SET_KEEP |
+ KEY_ALLOC_NOT_IN_QUOTA,
+ NULL, NULL);
+ if (IS_ERR(turris_signing_keyring)) {
+ pr_err("Cannot allocate Turris keyring\n");
+
+ unregister_key_type(&turris_signing_key_type);
+
+ return PTR_ERR(turris_signing_keyring);
+ }
+
+ return 0;
+}
+module_init(turris_signing_key_init);
+
+static void turris_signing_key_exit(void)
+{
+ key_put(turris_signing_keyring);
+ unregister_key_type(&turris_signing_key_type);
+}
+module_exit(turris_signing_key_exit);
+
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
+MODULE_DESCRIPTION("CZ.NIC's Turris signing key helper");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
index 74a8d8ed8d9f..c2f8f2e24398 100644
--- a/drivers/power/supply/qcom_pmi8998_charger.c
+++ b/drivers/power/supply/qcom_pmi8998_charger.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Linaro Ltd.
- * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ * Author: Casey Connolly <casey.connolly@linaro.org>
*
* This driver is for the switch-mode battery charger and boost
* hardware found in pmi8998 and related PMICs.
@@ -1045,6 +1045,6 @@ static struct platform_driver qcom_spmi_smb2 = {
module_platform_driver(qcom_spmi_smb2);
-MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_DESCRIPTION("Qualcomm SMB2 Charger Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 5832dce8ed9d..4789eafb8bac 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -20,6 +20,7 @@
struct stm32_pwm_lp {
struct clk *clk;
struct regmap *regmap;
+ unsigned int num_cc_chans;
};
static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip)
@@ -30,13 +31,101 @@ static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip)
/* STM32 Low-Power Timer is preceded by a configurable power-of-2 prescaler */
#define STM32_LPTIM_MAX_PRESCALER 128
+static int stm32_pwm_lp_update_allowed(struct stm32_pwm_lp *priv, int channel)
+{
+ int ret;
+ u32 ccmr1;
+ unsigned long ccmr;
+
+ /* Only one PWM on this LPTIMER: enable, prescaler and reload value can be changed */
+ if (!priv->num_cc_chans)
+ return true;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CCMR1, &ccmr1);
+ if (ret)
+ return ret;
+ ccmr = ccmr1 & (STM32_LPTIM_CC1E | STM32_LPTIM_CC2E);
+
+ /* More than one channel enabled: enable, prescaler or ARR value can't be changed */
+ if (bitmap_weight(&ccmr, sizeof(u32) * BITS_PER_BYTE) > 1)
+ return false;
+
+ /*
+ * Only one channel is enabled (or none): check status on the other channel, to
+ * report if enable, prescaler or ARR value can be changed.
+ */
+ if (channel)
+ return !(ccmr1 & STM32_LPTIM_CC1E);
+ else
+ return !(ccmr1 & STM32_LPTIM_CC2E);
+}
+
+static int stm32_pwm_lp_compare_channel_apply(struct stm32_pwm_lp *priv, int channel,
+ bool enable, enum pwm_polarity polarity)
+{
+ u32 ccmr1, val, mask;
+ bool reenable;
+ int ret;
+
+ /* No dedicated CC channel: nothing to do */
+ if (!priv->num_cc_chans)
+ return 0;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CCMR1, &ccmr1);
+ if (ret)
+ return ret;
+
+ if (channel) {
+ /* Must disable CC channel (CCxE) to modify polarity (CCxP), then re-enable */
+ reenable = (enable && FIELD_GET(STM32_LPTIM_CC2E, ccmr1)) &&
+ (polarity != FIELD_GET(STM32_LPTIM_CC2P, ccmr1));
+
+ mask = STM32_LPTIM_CC2SEL | STM32_LPTIM_CC2E | STM32_LPTIM_CC2P;
+ val = FIELD_PREP(STM32_LPTIM_CC2P, polarity);
+ val |= FIELD_PREP(STM32_LPTIM_CC2E, enable);
+ } else {
+ reenable = (enable && FIELD_GET(STM32_LPTIM_CC1E, ccmr1)) &&
+ (polarity != FIELD_GET(STM32_LPTIM_CC1P, ccmr1));
+
+ mask = STM32_LPTIM_CC1SEL | STM32_LPTIM_CC1E | STM32_LPTIM_CC1P;
+ val = FIELD_PREP(STM32_LPTIM_CC1P, polarity);
+ val |= FIELD_PREP(STM32_LPTIM_CC1E, enable);
+ }
+
+ if (reenable) {
+ u32 cfgr, presc;
+ unsigned long rate;
+ unsigned int delay_us;
+
+ ret = regmap_update_bits(priv->regmap, STM32_LPTIM_CCMR1,
+ channel ? STM32_LPTIM_CC2E : STM32_LPTIM_CC1E, 0);
+ if (ret)
+ return ret;
+ /*
+ * After a write to the LPTIM_CCMRx register, a new write operation can only be
+ * performed after a delay of at least (PRESC × 3) clock cycles
+ */
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr);
+ if (ret)
+ return ret;
+ presc = FIELD_GET(STM32_LPTIM_PRESC, cfgr);
+ rate = clk_get_rate(priv->clk) >> presc;
+ if (!rate)
+ return -EINVAL;
+ delay_us = 3 * DIV_ROUND_UP(USEC_PER_SEC, rate);
+ usleep_range(delay_us, delay_us * 2);
+ }
+
+ return regmap_update_bits(priv->regmap, STM32_LPTIM_CCMR1, mask, val);
+}
+
static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip);
unsigned long long prd, div, dty;
struct pwm_state cstate;
- u32 val, mask, cfgr, presc = 0;
+ u32 arr, val, mask, cfgr, presc = 0;
bool reenable;
int ret;
@@ -45,10 +134,28 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
if (cstate.enabled) {
- /* Disable LP timer */
- ret = regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+ /* Disable CC channel if any */
+ ret = stm32_pwm_lp_compare_channel_apply(priv, pwm->hwpwm, false,
+ state->polarity);
if (ret)
return ret;
+ ret = regmap_write(priv->regmap, pwm->hwpwm ?
+ STM32_LPTIM_CCR2 : STM32_LPTIM_CMP, 0);
+ if (ret)
+ return ret;
+
+ /* Check if the timer can be disabled */
+ ret = stm32_pwm_lp_update_allowed(priv, pwm->hwpwm);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ /* Disable LP timer */
+ ret = regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+ if (ret)
+ return ret;
+ }
+
/* disable clock to PWM counter */
clk_disable(priv->clk);
}
@@ -79,6 +186,23 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
dty = prd * state->duty_cycle;
do_div(dty, state->period);
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr);
+ if (ret)
+ return ret;
+
+ /*
+ * When there are several channels, they share the same prescaler and reload value.
+ * Check if this can be changed, or the values are the same for all channels.
+ */
+ if (!stm32_pwm_lp_update_allowed(priv, pwm->hwpwm)) {
+ ret = regmap_read(priv->regmap, STM32_LPTIM_ARR, &arr);
+ if (ret)
+ return ret;
+
+ if ((FIELD_GET(STM32_LPTIM_PRESC, cfgr) != presc) || (arr != prd - 1))
+ return -EBUSY;
+ }
+
if (!cstate.enabled) {
/* enable clock to drive PWM counter */
ret = clk_enable(priv->clk);
@@ -86,15 +210,20 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
- ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr);
- if (ret)
- goto err;
-
if ((FIELD_GET(STM32_LPTIM_PRESC, cfgr) != presc) ||
- (FIELD_GET(STM32_LPTIM_WAVPOL, cfgr) != state->polarity)) {
+ ((FIELD_GET(STM32_LPTIM_WAVPOL, cfgr) != state->polarity) && !priv->num_cc_chans)) {
val = FIELD_PREP(STM32_LPTIM_PRESC, presc);
- val |= FIELD_PREP(STM32_LPTIM_WAVPOL, state->polarity);
- mask = STM32_LPTIM_PRESC | STM32_LPTIM_WAVPOL;
+ mask = STM32_LPTIM_PRESC;
+
+ if (!priv->num_cc_chans) {
+ /*
+ * WAVPOL bit is only available when no capature compare channel is used,
+ * e.g. on LPTIMER instances that have only one output channel. CCMR1 is
+ * used otherwise.
+ */
+ val |= FIELD_PREP(STM32_LPTIM_WAVPOL, state->polarity);
+ mask |= STM32_LPTIM_WAVPOL;
+ }
/* Must disable LP timer to modify CFGR */
reenable = true;
@@ -120,20 +249,27 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret)
goto err;
- ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, prd - (1 + dty));
+ /* Write CMP/CCRx register and ensure it's been properly written */
+ ret = regmap_write(priv->regmap, pwm->hwpwm ? STM32_LPTIM_CCR2 : STM32_LPTIM_CMP,
+ prd - (1 + dty));
if (ret)
goto err;
- /* ensure CMP & ARR registers are properly written */
- ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+ /* ensure ARR and CMP/CCRx registers are properly written */
+ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, pwm->hwpwm ?
+ (val & STM32_LPTIM_CMP2_ARROK) == STM32_LPTIM_CMP2_ARROK :
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret) {
dev_err(pwmchip_parent(chip), "ARR/CMP registers write issue\n");
goto err;
}
- ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
- STM32_LPTIM_CMPOKCF_ARROKCF);
+ ret = regmap_write(priv->regmap, STM32_LPTIM_ICR, pwm->hwpwm ?
+ STM32_LPTIM_CMP2OKCF_ARROKCF : STM32_LPTIM_CMPOKCF_ARROKCF);
+ if (ret)
+ goto err;
+
+ ret = stm32_pwm_lp_compare_channel_apply(priv, pwm->hwpwm, true, state->polarity);
if (ret)
goto err;
@@ -161,11 +297,22 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
{
struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip);
unsigned long rate = clk_get_rate(priv->clk);
- u32 val, presc, prd;
+ u32 val, presc, prd, ccmr1;
+ bool enabled;
u64 tmp;
regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
- state->enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
+ enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
+ if (priv->num_cc_chans) {
+ /* There's a CC chan, need to also check if it's enabled */
+ regmap_read(priv->regmap, STM32_LPTIM_CCMR1, &ccmr1);
+ if (pwm->hwpwm)
+ enabled &= !!FIELD_GET(STM32_LPTIM_CC2E, ccmr1);
+ else
+ enabled &= !!FIELD_GET(STM32_LPTIM_CC1E, ccmr1);
+ }
+ state->enabled = enabled;
+
/* Keep PWM counter clock refcount in sync with PWM initial state */
if (state->enabled) {
int ret = clk_enable(priv->clk);
@@ -176,14 +323,21 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
regmap_read(priv->regmap, STM32_LPTIM_CFGR, &val);
presc = FIELD_GET(STM32_LPTIM_PRESC, val);
- state->polarity = FIELD_GET(STM32_LPTIM_WAVPOL, val);
+ if (priv->num_cc_chans) {
+ if (pwm->hwpwm)
+ state->polarity = FIELD_GET(STM32_LPTIM_CC2P, ccmr1);
+ else
+ state->polarity = FIELD_GET(STM32_LPTIM_CC1P, ccmr1);
+ } else {
+ state->polarity = FIELD_GET(STM32_LPTIM_WAVPOL, val);
+ }
regmap_read(priv->regmap, STM32_LPTIM_ARR, &prd);
tmp = prd + 1;
tmp = (tmp << presc) * NSEC_PER_SEC;
state->period = DIV_ROUND_CLOSEST_ULL(tmp, rate);
- regmap_read(priv->regmap, STM32_LPTIM_CMP, &val);
+ regmap_read(priv->regmap, pwm->hwpwm ? STM32_LPTIM_CCR2 : STM32_LPTIM_CMP, &val);
tmp = prd - val;
tmp = (tmp << presc) * NSEC_PER_SEC;
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate);
@@ -201,15 +355,25 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
struct stm32_pwm_lp *priv;
struct pwm_chip *chip;
+ unsigned int npwm;
int ret;
- chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*priv));
+ if (!ddata->num_cc_chans) {
+ /* No dedicated CC channel, so there's only one PWM channel */
+ npwm = 1;
+ } else {
+ /* There are dedicated CC channels, each with one PWM output */
+ npwm = ddata->num_cc_chans;
+ }
+
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*priv));
if (IS_ERR(chip))
return PTR_ERR(chip);
priv = to_stm32_pwm_lp(chip);
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
+ priv->num_cc_chans = ddata->num_cc_chans;
chip->ops = &stm32_pwm_lp_ops;
ret = devm_pwmchip_add(&pdev->dev, chip);
@@ -225,12 +389,15 @@ static int stm32_pwm_lp_suspend(struct device *dev)
{
struct pwm_chip *chip = dev_get_drvdata(dev);
struct pwm_state state;
-
- pwm_get_state(&chip->pwms[0], &state);
- if (state.enabled) {
- dev_err(dev, "The consumer didn't stop us (%s)\n",
- chip->pwms[0].label);
- return -EBUSY;
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ pwm_get_state(&chip->pwms[i], &state);
+ if (state.enabled) {
+ dev_err(dev, "The consumer didn't stop us (%s)\n",
+ chip->pwms[i].label);
+ return -EBUSY;
+ }
}
return pinctrl_pm_select_sleep_state(dev);
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index cbf531d0ba68..995cfeca972b 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -98,18 +98,6 @@ MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
#endif
/*
- * An internal DMA coherent buffer
- */
-struct mport_dma_buf {
- void *ib_base;
- dma_addr_t ib_phys;
- u32 ib_size;
- u64 ib_rio_base;
- bool ib_map;
- struct file *filp;
-};
-
-/*
* Internal memory mapping structure
*/
enum rio_mport_map_dir {
@@ -131,14 +119,6 @@ struct rio_mport_mapping {
struct file *filp;
};
-struct rio_mport_dma_map {
- int valid;
- u64 length;
- void *vaddr;
- dma_addr_t paddr;
-};
-
-#define MPORT_MAX_DMA_BUFS 16
#define MPORT_EVENT_DEPTH 10
/*
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 9544b8ee0c96..46daf32ea13b 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1775,19 +1775,6 @@ struct dma_chan *rio_request_mport_dma(struct rio_mport *mport)
EXPORT_SYMBOL_GPL(rio_request_mport_dma);
/**
- * rio_request_dma - request RapidIO capable DMA channel that supports
- * specified target RapidIO device.
- * @rdev: RIO device associated with DMA transfer
- *
- * Returns pointer to allocated DMA channel or NULL if failed.
- */
-struct dma_chan *rio_request_dma(struct rio_dev *rdev)
-{
- return rio_request_mport_dma(rdev->net->hport);
-}
-EXPORT_SYMBOL_GPL(rio_request_dma);
-
-/**
* rio_release_dma - release specified DMA channel
* @dchan: DMA channel to release
*/
@@ -1834,57 +1821,9 @@ struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan,
}
EXPORT_SYMBOL_GPL(rio_dma_prep_xfer);
-/**
- * rio_dma_prep_slave_sg - RapidIO specific wrapper
- * for device_prep_slave_sg callback defined by DMAENGINE.
- * @rdev: RIO device control structure
- * @dchan: DMA channel to configure
- * @data: RIO specific data descriptor
- * @direction: DMA data transfer direction (TO or FROM the device)
- * @flags: dmaengine defined flags
- *
- * Initializes RapidIO capable DMA channel for the specified data transfer.
- * Uses DMA channel private extension to pass information related to remote
- * target RIO device.
- *
- * Returns: pointer to DMA transaction descriptor if successful,
- * error-valued pointer or NULL if failed.
- */
-struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
- struct dma_chan *dchan, struct rio_dma_data *data,
- enum dma_transfer_direction direction, unsigned long flags)
-{
- return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags);
-}
-EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
-
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
/**
- * rio_find_mport - find RIO mport by its ID
- * @mport_id: number (ID) of mport device
- *
- * Given a RIO mport number, the desired mport is located
- * in the global list of mports. If the mport is found, a pointer to its
- * data structure is returned. If no mport is found, %NULL is returned.
- */
-struct rio_mport *rio_find_mport(int mport_id)
-{
- struct rio_mport *port;
-
- mutex_lock(&rio_mport_list_lock);
- list_for_each_entry(port, &rio_mports, node) {
- if (port->id == mport_id)
- goto found;
- }
- port = NULL;
-found:
- mutex_unlock(&rio_mport_list_lock);
-
- return port;
-}
-
-/**
* rio_register_scan - enumeration/discovery method registration interface
* @mport_id: mport device ID for which fabric scan routine has to be set
* (RIO_MPORT_ANY = set for all available mports)
@@ -1962,48 +1901,6 @@ err_out:
EXPORT_SYMBOL_GPL(rio_register_scan);
/**
- * rio_unregister_scan - removes enumeration/discovery method from mport
- * @mport_id: mport device ID for which fabric scan routine has to be
- * unregistered (RIO_MPORT_ANY = apply to all mports that use
- * the specified scan_ops)
- * @scan_ops: enumeration/discovery operations structure
- *
- * Removes enumeration or discovery method assigned to the specified mport
- * device. If RIO_MPORT_ANY is specified, removes the specified operations from
- * all mports that have them attached.
- */
-int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops)
-{
- struct rio_mport *port;
- struct rio_scan_node *scan;
-
- pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id);
-
- if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS)
- return -EINVAL;
-
- mutex_lock(&rio_mport_list_lock);
-
- list_for_each_entry(port, &rio_mports, node)
- if (port->id == mport_id ||
- (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops))
- port->nscan = NULL;
-
- list_for_each_entry(scan, &rio_scans, node) {
- if (scan->mport_id == mport_id) {
- list_del(&scan->node);
- kfree(scan);
- break;
- }
- }
-
- mutex_unlock(&rio_mport_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rio_unregister_scan);
-
-/**
* rio_mport_scan - execute enumeration/discovery on the specified mport
* @mport_id: number (ID) of mport device
*/
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index f482de0d0370..a0e2a09ddb8e 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -41,9 +41,7 @@ extern void rio_del_device(struct rio_dev *rdev, enum rio_device_state state);
extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u8 port_num);
extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops);
-extern int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops);
extern void rio_attach_device(struct rio_dev *rdev);
-extern struct rio_mport *rio_find_mport(int mport_id);
extern int rio_mport_scan(int mport_id);
/* Structures internal to the RIO core code */
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 9135227301c8..97287e838ce1 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -198,12 +198,6 @@ struct cm_peer {
struct rio_dev *rdev;
};
-struct rio_cm_work {
- struct work_struct work;
- struct cm_dev *cm;
- void *data;
-};
-
struct conn_req {
struct list_head node;
u32 destid; /* requester destID */
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 9f0cda46b015..50414f4cb109 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -18,112 +18,236 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
-/* I2C slave 0 registers */
-#define BCM590XX_RFLDOPMCTRL1 0x60
-#define BCM590XX_IOSR1PMCTRL1 0x7a
-#define BCM590XX_IOSR2PMCTRL1 0x7c
-#define BCM590XX_CSRPMCTRL1 0x7e
-#define BCM590XX_SDSR1PMCTRL1 0x82
-#define BCM590XX_SDSR2PMCTRL1 0x86
-#define BCM590XX_MSRPMCTRL1 0x8a
-#define BCM590XX_VSRPMCTRL1 0x8e
-#define BCM590XX_RFLDOCTRL 0x96
-#define BCM590XX_CSRVOUT1 0xc0
-
-/* I2C slave 1 registers */
-#define BCM590XX_GPLDO5PMCTRL1 0x16
-#define BCM590XX_GPLDO6PMCTRL1 0x18
-#define BCM590XX_GPLDO1CTRL 0x1a
-#define BCM590XX_GPLDO2CTRL 0x1b
-#define BCM590XX_GPLDO3CTRL 0x1c
-#define BCM590XX_GPLDO4CTRL 0x1d
-#define BCM590XX_GPLDO5CTRL 0x1e
-#define BCM590XX_GPLDO6CTRL 0x1f
-#define BCM590XX_OTG_CTRL 0x40
-#define BCM590XX_GPLDO1PMCTRL1 0x57
-#define BCM590XX_GPLDO2PMCTRL1 0x59
-#define BCM590XX_GPLDO3PMCTRL1 0x5b
-#define BCM590XX_GPLDO4PMCTRL1 0x5d
-
#define BCM590XX_REG_ENABLE BIT(7)
#define BCM590XX_VBUS_ENABLE BIT(2)
#define BCM590XX_LDO_VSEL_MASK GENMASK(5, 3)
#define BCM590XX_SR_VSEL_MASK GENMASK(5, 0)
+enum bcm590xx_reg_type {
+ BCM590XX_REG_TYPE_LDO,
+ BCM590XX_REG_TYPE_GPLDO,
+ BCM590XX_REG_TYPE_SR,
+ BCM590XX_REG_TYPE_VBUS
+};
+
+struct bcm590xx_reg_data {
+ enum bcm590xx_reg_type type;
+ enum bcm590xx_regmap_type regmap;
+ const struct regulator_desc desc;
+};
+
+struct bcm590xx_reg {
+ struct bcm590xx *mfd;
+ unsigned int n_regulators;
+ const struct bcm590xx_reg_data *regs;
+};
+
+static const struct regulator_ops bcm590xx_ops_ldo = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
+/*
+ * LDO ops without voltage selection, used for MICLDO on BCM59054.
+ * (These are currently the same as VBUS ops, but will be different
+ * in the future once full PMMODE support is implemented.)
+ */
+static const struct regulator_ops bcm590xx_ops_ldo_novolt = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+static const struct regulator_ops bcm590xx_ops_dcdc = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_ops bcm590xx_ops_vbus = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+#define BCM590XX_REG_DESC(_model, _name, _name_lower) \
+ .id = _model##_REG_##_name, \
+ .name = #_name_lower, \
+ .of_match = of_match_ptr(#_name_lower), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE \
+
+#define BCM590XX_LDO_DESC(_model, _model_lower, _name, _name_lower, _table) \
+ BCM590XX_REG_DESC(_model, _name, _name_lower), \
+ .ops = &bcm590xx_ops_ldo, \
+ .n_voltages = ARRAY_SIZE(_model_lower##_##_table), \
+ .volt_table = _model_lower##_##_table, \
+ .vsel_reg = _model##_##_name##CTRL, \
+ .vsel_mask = BCM590XX_LDO_VSEL_MASK, \
+ .enable_reg = _model##_##_name##PMCTRL1, \
+ .enable_mask = BCM590XX_REG_ENABLE, \
+ .enable_is_inverted = true
+
+#define BCM590XX_SR_DESC(_model, _model_lower, _name, _name_lower, _ranges) \
+ BCM590XX_REG_DESC(_model, _name, _name_lower), \
+ .ops = &bcm590xx_ops_dcdc, \
+ .n_voltages = 64, \
+ .linear_ranges = _model_lower##_##_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(_model_lower##_##_ranges), \
+ .vsel_reg = _model##_##_name##VOUT1, \
+ .vsel_mask = BCM590XX_SR_VSEL_MASK, \
+ .enable_reg = _model##_##_name##PMCTRL1, \
+ .enable_mask = BCM590XX_REG_ENABLE, \
+ .enable_is_inverted = true
+
+#define BCM59056_REG_DESC(_name, _name_lower) \
+ BCM590XX_REG_DESC(BCM59056, _name, _name_lower)
+#define BCM59056_LDO_DESC(_name, _name_lower, _table) \
+ BCM590XX_LDO_DESC(BCM59056, bcm59056, _name, _name_lower, _table)
+#define BCM59056_SR_DESC(_name, _name_lower, _ranges) \
+ BCM590XX_SR_DESC(BCM59056, bcm59056, _name, _name_lower, _ranges)
+
+#define BCM59054_REG_DESC(_name, _name_lower) \
+ BCM590XX_REG_DESC(BCM59054, _name, _name_lower)
+#define BCM59054_LDO_DESC(_name, _name_lower, _table) \
+ BCM590XX_LDO_DESC(BCM59054, bcm59054, _name, _name_lower, _table)
+#define BCM59054_SR_DESC(_name, _name_lower, _ranges) \
+ BCM590XX_SR_DESC(BCM59054, bcm59054, _name, _name_lower, _ranges)
+
+/* BCM59056 data */
+
+/* I2C slave 0 registers */
+#define BCM59056_RFLDOPMCTRL1 0x60
+#define BCM59056_CAMLDO1PMCTRL1 0x62
+#define BCM59056_CAMLDO2PMCTRL1 0x64
+#define BCM59056_SIMLDO1PMCTRL1 0x66
+#define BCM59056_SIMLDO2PMCTRL1 0x68
+#define BCM59056_SDLDOPMCTRL1 0x6a
+#define BCM59056_SDXLDOPMCTRL1 0x6c
+#define BCM59056_MMCLDO1PMCTRL1 0x6e
+#define BCM59056_MMCLDO2PMCTRL1 0x70
+#define BCM59056_AUDLDOPMCTRL1 0x72
+#define BCM59056_MICLDOPMCTRL1 0x74
+#define BCM59056_USBLDOPMCTRL1 0x76
+#define BCM59056_VIBLDOPMCTRL1 0x78
+#define BCM59056_IOSR1PMCTRL1 0x7a
+#define BCM59056_IOSR2PMCTRL1 0x7c
+#define BCM59056_CSRPMCTRL1 0x7e
+#define BCM59056_SDSR1PMCTRL1 0x82
+#define BCM59056_SDSR2PMCTRL1 0x86
+#define BCM59056_MSRPMCTRL1 0x8a
+#define BCM59056_VSRPMCTRL1 0x8e
+#define BCM59056_RFLDOCTRL 0x96
+#define BCM59056_CAMLDO1CTRL 0x97
+#define BCM59056_CAMLDO2CTRL 0x98
+#define BCM59056_SIMLDO1CTRL 0x99
+#define BCM59056_SIMLDO2CTRL 0x9a
+#define BCM59056_SDLDOCTRL 0x9b
+#define BCM59056_SDXLDOCTRL 0x9c
+#define BCM59056_MMCLDO1CTRL 0x9d
+#define BCM59056_MMCLDO2CTRL 0x9e
+#define BCM59056_AUDLDOCTRL 0x9f
+#define BCM59056_MICLDOCTRL 0xa0
+#define BCM59056_USBLDOCTRL 0xa1
+#define BCM59056_VIBLDOCTRL 0xa2
+#define BCM59056_CSRVOUT1 0xc0
+#define BCM59056_IOSR1VOUT1 0xc3
+#define BCM59056_IOSR2VOUT1 0xc6
+#define BCM59056_MSRVOUT1 0xc9
+#define BCM59056_SDSR1VOUT1 0xcc
+#define BCM59056_SDSR2VOUT1 0xcf
+#define BCM59056_VSRVOUT1 0xd2
+
+/* I2C slave 1 registers */
+#define BCM59056_GPLDO5PMCTRL1 0x16
+#define BCM59056_GPLDO6PMCTRL1 0x18
+#define BCM59056_GPLDO1CTRL 0x1a
+#define BCM59056_GPLDO2CTRL 0x1b
+#define BCM59056_GPLDO3CTRL 0x1c
+#define BCM59056_GPLDO4CTRL 0x1d
+#define BCM59056_GPLDO5CTRL 0x1e
+#define BCM59056_GPLDO6CTRL 0x1f
+#define BCM59056_OTG_CTRL 0x40
+#define BCM59056_GPLDO1PMCTRL1 0x57
+#define BCM59056_GPLDO2PMCTRL1 0x59
+#define BCM59056_GPLDO3PMCTRL1 0x5b
+#define BCM59056_GPLDO4PMCTRL1 0x5d
+
/*
* RFLDO to VSR regulators are
* accessed via I2C slave 0
*/
/* LDO regulator IDs */
-#define BCM590XX_REG_RFLDO 0
-#define BCM590XX_REG_CAMLDO1 1
-#define BCM590XX_REG_CAMLDO2 2
-#define BCM590XX_REG_SIMLDO1 3
-#define BCM590XX_REG_SIMLDO2 4
-#define BCM590XX_REG_SDLDO 5
-#define BCM590XX_REG_SDXLDO 6
-#define BCM590XX_REG_MMCLDO1 7
-#define BCM590XX_REG_MMCLDO2 8
-#define BCM590XX_REG_AUDLDO 9
-#define BCM590XX_REG_MICLDO 10
-#define BCM590XX_REG_USBLDO 11
-#define BCM590XX_REG_VIBLDO 12
+#define BCM59056_REG_RFLDO 0
+#define BCM59056_REG_CAMLDO1 1
+#define BCM59056_REG_CAMLDO2 2
+#define BCM59056_REG_SIMLDO1 3
+#define BCM59056_REG_SIMLDO2 4
+#define BCM59056_REG_SDLDO 5
+#define BCM59056_REG_SDXLDO 6
+#define BCM59056_REG_MMCLDO1 7
+#define BCM59056_REG_MMCLDO2 8
+#define BCM59056_REG_AUDLDO 9
+#define BCM59056_REG_MICLDO 10
+#define BCM59056_REG_USBLDO 11
+#define BCM59056_REG_VIBLDO 12
/* DCDC regulator IDs */
-#define BCM590XX_REG_CSR 13
-#define BCM590XX_REG_IOSR1 14
-#define BCM590XX_REG_IOSR2 15
-#define BCM590XX_REG_MSR 16
-#define BCM590XX_REG_SDSR1 17
-#define BCM590XX_REG_SDSR2 18
-#define BCM590XX_REG_VSR 19
+#define BCM59056_REG_CSR 13
+#define BCM59056_REG_IOSR1 14
+#define BCM59056_REG_IOSR2 15
+#define BCM59056_REG_MSR 16
+#define BCM59056_REG_SDSR1 17
+#define BCM59056_REG_SDSR2 18
+#define BCM59056_REG_VSR 19
/*
* GPLDO1 to VBUS regulators are
* accessed via I2C slave 1
*/
-#define BCM590XX_REG_GPLDO1 20
-#define BCM590XX_REG_GPLDO2 21
-#define BCM590XX_REG_GPLDO3 22
-#define BCM590XX_REG_GPLDO4 23
-#define BCM590XX_REG_GPLDO5 24
-#define BCM590XX_REG_GPLDO6 25
-#define BCM590XX_REG_VBUS 26
+#define BCM59056_REG_GPLDO1 20
+#define BCM59056_REG_GPLDO2 21
+#define BCM59056_REG_GPLDO3 22
+#define BCM59056_REG_GPLDO4 23
+#define BCM59056_REG_GPLDO5 24
+#define BCM59056_REG_GPLDO6 25
+#define BCM59056_REG_VBUS 26
-#define BCM590XX_NUM_REGS 27
-
-#define BCM590XX_REG_IS_LDO(n) (n < BCM590XX_REG_CSR)
-#define BCM590XX_REG_IS_GPLDO(n) \
- ((n > BCM590XX_REG_VSR) && (n < BCM590XX_REG_VBUS))
-#define BCM590XX_REG_IS_VBUS(n) (n == BCM590XX_REG_VBUS)
+#define BCM59056_NUM_REGS 27
/* LDO group A: supported voltages in microvolts */
-static const unsigned int ldo_a_table[] = {
+static const unsigned int bcm59056_ldo_a_table[] = {
1200000, 1800000, 2500000, 2700000, 2800000,
2900000, 3000000, 3300000,
};
/* LDO group C: supported voltages in microvolts */
-static const unsigned int ldo_c_table[] = {
+static const unsigned int bcm59056_ldo_c_table[] = {
3100000, 1800000, 2500000, 2700000, 2800000,
2900000, 3000000, 3300000,
};
-static const unsigned int ldo_vbus[] = {
- 5000000,
-};
-
/* DCDC group CSR: supported voltages in microvolts */
-static const struct linear_range dcdc_csr_ranges[] = {
+static const struct linear_range bcm59056_dcdc_csr_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
REGULATOR_LINEAR_RANGE(1360000, 51, 55, 20000),
REGULATOR_LINEAR_RANGE(900000, 56, 63, 0),
};
/* DCDC group IOSR1: supported voltages in microvolts */
-static const struct linear_range dcdc_iosr1_ranges[] = {
+static const struct linear_range bcm59056_dcdc_iosr1_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 51, 10000),
REGULATOR_LINEAR_RANGE(1500000, 52, 52, 0),
REGULATOR_LINEAR_RANGE(1800000, 53, 53, 0),
@@ -131,155 +255,854 @@ static const struct linear_range dcdc_iosr1_ranges[] = {
};
/* DCDC group SDSR1: supported voltages in microvolts */
-static const struct linear_range dcdc_sdsr1_ranges[] = {
+static const struct linear_range bcm59056_dcdc_sdsr1_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
REGULATOR_LINEAR_RANGE(1340000, 51, 51, 0),
REGULATOR_LINEAR_RANGE(900000, 52, 63, 0),
};
-struct bcm590xx_info {
- const char *name;
- const char *vin_name;
- u8 n_voltages;
- const unsigned int *volt_table;
- u8 n_linear_ranges;
- const struct linear_range *linear_ranges;
-};
+static const struct bcm590xx_reg_data bcm59056_regs[BCM59056_NUM_REGS] = {
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(RFLDO, rfldo, ldo_a_table),
+ },
+ },
-#define BCM590XX_REG_TABLE(_name, _table) \
- { \
- .name = #_name, \
- .n_voltages = ARRAY_SIZE(_table), \
- .volt_table = _table, \
- }
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(CAMLDO1, camldo1, ldo_c_table),
+ },
+ },
-#define BCM590XX_REG_RANGES(_name, _ranges) \
- { \
- .name = #_name, \
- .n_voltages = 64, \
- .n_linear_ranges = ARRAY_SIZE(_ranges), \
- .linear_ranges = _ranges, \
- }
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(CAMLDO2, camldo2, ldo_c_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SIMLDO1, simldo1, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SIMLDO2, simldo2, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SDLDO, sdldo, ldo_c_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SDXLDO, sdxldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(MMCLDO1, mmcldo1, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(MMCLDO2, mmcldo2, ldo_a_table),
+ },
+ },
-static struct bcm590xx_info bcm590xx_regs[] = {
- BCM590XX_REG_TABLE(rfldo, ldo_a_table),
- BCM590XX_REG_TABLE(camldo1, ldo_c_table),
- BCM590XX_REG_TABLE(camldo2, ldo_c_table),
- BCM590XX_REG_TABLE(simldo1, ldo_a_table),
- BCM590XX_REG_TABLE(simldo2, ldo_a_table),
- BCM590XX_REG_TABLE(sdldo, ldo_c_table),
- BCM590XX_REG_TABLE(sdxldo, ldo_a_table),
- BCM590XX_REG_TABLE(mmcldo1, ldo_a_table),
- BCM590XX_REG_TABLE(mmcldo2, ldo_a_table),
- BCM590XX_REG_TABLE(audldo, ldo_a_table),
- BCM590XX_REG_TABLE(micldo, ldo_a_table),
- BCM590XX_REG_TABLE(usbldo, ldo_a_table),
- BCM590XX_REG_TABLE(vibldo, ldo_c_table),
- BCM590XX_REG_RANGES(csr, dcdc_csr_ranges),
- BCM590XX_REG_RANGES(iosr1, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(iosr2, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(msr, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(sdsr1, dcdc_sdsr1_ranges),
- BCM590XX_REG_RANGES(sdsr2, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(vsr, dcdc_iosr1_ranges),
- BCM590XX_REG_TABLE(gpldo1, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo2, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo3, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo4, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo5, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo6, ldo_a_table),
- BCM590XX_REG_TABLE(vbus, ldo_vbus),
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(AUDLDO, audldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(MICLDO, micldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(USBLDO, usbldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(VIBLDO, vibldo, ldo_c_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(CSR, csr, dcdc_csr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(IOSR1, iosr1, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(IOSR2, iosr2, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(MSR, msr, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(SDSR1, sdsr1, dcdc_sdsr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(SDSR2, sdsr2, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(VSR, vsr, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO1, gpldo1, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO2, gpldo2, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO3, gpldo3, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO4, gpldo4, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO5, gpldo5, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO6, gpldo6, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_VBUS,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_REG_DESC(VBUS, vbus),
+ .ops = &bcm590xx_ops_vbus,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .enable_reg = BCM59056_OTG_CTRL,
+ .enable_mask = BCM590XX_VBUS_ENABLE,
+ },
+ },
};
-struct bcm590xx_reg {
- struct regulator_desc *desc;
- struct bcm590xx *mfd;
+/* BCM59054 data */
+
+/* I2C slave 0 registers */
+#define BCM59054_RFLDOPMCTRL1 0x60
+#define BCM59054_CAMLDO1PMCTRL1 0x62
+#define BCM59054_CAMLDO2PMCTRL1 0x64
+#define BCM59054_SIMLDO1PMCTRL1 0x66
+#define BCM59054_SIMLDO2PMCTRL1 0x68
+#define BCM59054_SDLDOPMCTRL1 0x6a
+#define BCM59054_SDXLDOPMCTRL1 0x6c
+#define BCM59054_MMCLDO1PMCTRL1 0x6e
+#define BCM59054_MMCLDO2PMCTRL1 0x70
+#define BCM59054_AUDLDOPMCTRL1 0x72
+#define BCM59054_MICLDOPMCTRL1 0x74
+#define BCM59054_USBLDOPMCTRL1 0x76
+#define BCM59054_VIBLDOPMCTRL1 0x78
+#define BCM59054_IOSR1PMCTRL1 0x7a
+#define BCM59054_IOSR2PMCTRL1 0x7c
+#define BCM59054_CSRPMCTRL1 0x7e
+#define BCM59054_SDSR1PMCTRL1 0x82
+#define BCM59054_SDSR2PMCTRL1 0x86
+#define BCM59054_MMSRPMCTRL1 0x8a
+#define BCM59054_VSRPMCTRL1 0x8e
+#define BCM59054_RFLDOCTRL 0x96
+#define BCM59054_CAMLDO1CTRL 0x97
+#define BCM59054_CAMLDO2CTRL 0x98
+#define BCM59054_SIMLDO1CTRL 0x99
+#define BCM59054_SIMLDO2CTRL 0x9a
+#define BCM59054_SDLDOCTRL 0x9b
+#define BCM59054_SDXLDOCTRL 0x9c
+#define BCM59054_MMCLDO1CTRL 0x9d
+#define BCM59054_MMCLDO2CTRL 0x9e
+#define BCM59054_AUDLDOCTRL 0x9f
+#define BCM59054_MICLDOCTRL 0xa0
+#define BCM59054_USBLDOCTRL 0xa1
+#define BCM59054_VIBLDOCTRL 0xa2
+#define BCM59054_CSRVOUT1 0xc0
+#define BCM59054_IOSR1VOUT1 0xc3
+#define BCM59054_IOSR2VOUT1 0xc6
+#define BCM59054_MMSRVOUT1 0xc9
+#define BCM59054_SDSR1VOUT1 0xcc
+#define BCM59054_SDSR2VOUT1 0xcf
+#define BCM59054_VSRVOUT1 0xd2
+
+/* I2C slave 1 registers */
+#define BCM59054_LVLDO1PMCTRL1 0x16
+#define BCM59054_LVLDO2PMCTRL1 0x18
+#define BCM59054_GPLDO1CTRL 0x1a
+#define BCM59054_GPLDO2CTRL 0x1b
+#define BCM59054_GPLDO3CTRL 0x1c
+#define BCM59054_TCXLDOCTRL 0x1d
+#define BCM59054_LVLDO1CTRL 0x1e
+#define BCM59054_LVLDO2CTRL 0x1f
+#define BCM59054_OTG_CTRL 0x40
+#define BCM59054_GPLDO1PMCTRL1 0x57
+#define BCM59054_GPLDO2PMCTRL1 0x59
+#define BCM59054_GPLDO3PMCTRL1 0x5b
+#define BCM59054_TCXLDOPMCTRL1 0x5d
+
+/*
+ * RFLDO to VSR regulators are
+ * accessed via I2C slave 0
+ */
+
+/* LDO regulator IDs */
+#define BCM59054_REG_RFLDO 0
+#define BCM59054_REG_CAMLDO1 1
+#define BCM59054_REG_CAMLDO2 2
+#define BCM59054_REG_SIMLDO1 3
+#define BCM59054_REG_SIMLDO2 4
+#define BCM59054_REG_SDLDO 5
+#define BCM59054_REG_SDXLDO 6
+#define BCM59054_REG_MMCLDO1 7
+#define BCM59054_REG_MMCLDO2 8
+#define BCM59054_REG_AUDLDO 9
+#define BCM59054_REG_MICLDO 10
+#define BCM59054_REG_USBLDO 11
+#define BCM59054_REG_VIBLDO 12
+
+/* DCDC regulator IDs */
+#define BCM59054_REG_CSR 13
+#define BCM59054_REG_IOSR1 14
+#define BCM59054_REG_IOSR2 15
+#define BCM59054_REG_MMSR 16
+#define BCM59054_REG_SDSR1 17
+#define BCM59054_REG_SDSR2 18
+#define BCM59054_REG_VSR 19
+
+/*
+ * GPLDO1 to VBUS regulators are
+ * accessed via I2C slave 1
+ */
+
+#define BCM59054_REG_GPLDO1 20
+#define BCM59054_REG_GPLDO2 21
+#define BCM59054_REG_GPLDO3 22
+#define BCM59054_REG_TCXLDO 23
+#define BCM59054_REG_LVLDO1 24
+#define BCM59054_REG_LVLDO2 25
+#define BCM59054_REG_VBUS 26
+
+#define BCM59054_NUM_REGS 27
+
+/* LDO group 1: supported voltages in microvolts */
+static const unsigned int bcm59054_ldo_1_table[] = {
+ 1200000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
};
-static int bcm590xx_get_vsel_register(int id)
-{
- if (BCM590XX_REG_IS_LDO(id))
- return BCM590XX_RFLDOCTRL + id;
- else if (BCM590XX_REG_IS_GPLDO(id))
- return BCM590XX_GPLDO1CTRL + id;
- else
- return BCM590XX_CSRVOUT1 + (id - BCM590XX_REG_CSR) * 3;
-}
+/* LDO group 2: supported voltages in microvolts */
+static const unsigned int bcm59054_ldo_2_table[] = {
+ 3100000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
+};
-static int bcm590xx_get_enable_register(int id)
-{
- int reg = 0;
-
- if (BCM590XX_REG_IS_LDO(id))
- reg = BCM590XX_RFLDOPMCTRL1 + id * 2;
- else if (BCM590XX_REG_IS_GPLDO(id))
- reg = BCM590XX_GPLDO1PMCTRL1 + id * 2;
- else
- switch (id) {
- case BCM590XX_REG_CSR:
- reg = BCM590XX_CSRPMCTRL1;
- break;
- case BCM590XX_REG_IOSR1:
- reg = BCM590XX_IOSR1PMCTRL1;
- break;
- case BCM590XX_REG_IOSR2:
- reg = BCM590XX_IOSR2PMCTRL1;
- break;
- case BCM590XX_REG_MSR:
- reg = BCM590XX_MSRPMCTRL1;
- break;
- case BCM590XX_REG_SDSR1:
- reg = BCM590XX_SDSR1PMCTRL1;
- break;
- case BCM590XX_REG_SDSR2:
- reg = BCM590XX_SDSR2PMCTRL1;
- break;
- case BCM590XX_REG_VSR:
- reg = BCM590XX_VSRPMCTRL1;
- break;
- case BCM590XX_REG_VBUS:
- reg = BCM590XX_OTG_CTRL;
- break;
- }
+/* LDO group 3: supported voltages in microvolts */
+static const unsigned int bcm59054_ldo_3_table[] = {
+ 1000000, 1107000, 1143000, 1214000, 1250000,
+ 1464000, 1500000, 1786000,
+};
+/* DCDC group SR: supported voltages in microvolts */
+static const struct linear_range bcm59054_dcdc_sr_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0, 1, 0),
+ REGULATOR_LINEAR_RANGE(860000, 2, 60, 10000),
+ REGULATOR_LINEAR_RANGE(1500000, 61, 61, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 62, 62, 0),
+ REGULATOR_LINEAR_RANGE(900000, 63, 63, 0),
+};
- return reg;
-}
+/* DCDC group VSR (BCM59054A1): supported voltages in microvolts */
+static const struct linear_range bcm59054_dcdc_vsr_a1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0, 1, 0),
+ REGULATOR_LINEAR_RANGE(860000, 2, 59, 10000),
+ REGULATOR_LINEAR_RANGE(1700000, 60, 60, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 61, 61, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 62, 62, 0),
+ REGULATOR_LINEAR_RANGE(1600000, 63, 63, 0),
+};
-static const struct regulator_ops bcm590xx_ops_ldo = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_iterate,
+/* DCDC group CSR: supported voltages in microvolts */
+static const struct linear_range bcm59054_dcdc_csr_ranges[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 1, 100000),
+ REGULATOR_LINEAR_RANGE(860000, 2, 60, 10000),
+ REGULATOR_LINEAR_RANGE(900000, 61, 63, 0),
};
-static const struct regulator_ops bcm590xx_ops_dcdc = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
+static const struct bcm590xx_reg_data bcm59054_regs[BCM59054_NUM_REGS] = {
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(RFLDO, rfldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO1, camldo1, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO2, camldo2, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO1, simldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO2, simldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDLDO, sdldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDXLDO, sdxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO1, mmcldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO2, mmcldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(AUDLDO, audldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_REG_DESC(MICLDO, micldo),
+ .ops = &bcm590xx_ops_ldo_novolt,
+ /* MICLDO is locked at 1.8V */
+ .n_voltages = 1,
+ .fixed_uV = 1800000,
+ .enable_reg = BCM59054_MICLDOPMCTRL1,
+ .enable_mask = BCM590XX_REG_ENABLE,
+ .enable_is_inverted = true,
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(USBLDO, usbldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(VIBLDO, vibldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(CSR, csr, dcdc_csr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR1, iosr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR2, iosr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(MMSR, mmsr, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR1, sdsr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR2, sdsr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(VSR, vsr, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO1, gpldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO2, gpldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO3, gpldo3, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(TCXLDO, tcxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO1, lvldo1, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO2, lvldo2, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_VBUS,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_REG_DESC(VBUS, vbus),
+ .ops = &bcm590xx_ops_vbus,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .enable_reg = BCM59054_OTG_CTRL,
+ .enable_mask = BCM590XX_VBUS_ENABLE,
+ },
+ },
};
-static const struct regulator_ops bcm590xx_ops_vbus = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
+/*
+ * BCM59054A1 regulators; same as previous revision, but with different
+ * VSR voltage table.
+ */
+static const struct bcm590xx_reg_data bcm59054_a1_regs[BCM59054_NUM_REGS] = {
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(RFLDO, rfldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO1, camldo1, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO2, camldo2, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO1, simldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO2, simldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDLDO, sdldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDXLDO, sdxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO1, mmcldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO2, mmcldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(AUDLDO, audldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_REG_DESC(MICLDO, micldo),
+ .ops = &bcm590xx_ops_ldo_novolt,
+ /* MICLDO is locked at 1.8V */
+ .n_voltages = 1,
+ .fixed_uV = 1800000,
+ .enable_reg = BCM59054_MICLDOPMCTRL1,
+ .enable_mask = BCM590XX_REG_ENABLE,
+ .enable_is_inverted = true,
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(USBLDO, usbldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(VIBLDO, vibldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(CSR, csr, dcdc_csr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR1, iosr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR2, iosr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(MMSR, mmsr, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR1, sdsr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR2, sdsr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(VSR, vsr, dcdc_vsr_a1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO1, gpldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO2, gpldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO3, gpldo3, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(TCXLDO, tcxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO1, lvldo1, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO2, lvldo2, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_VBUS,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_REG_DESC(VBUS, vbus),
+ .ops = &bcm590xx_ops_vbus,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .enable_reg = BCM59054_OTG_CTRL,
+ .enable_mask = BCM590XX_VBUS_ENABLE,
+ },
+ },
};
static int bcm590xx_probe(struct platform_device *pdev)
{
struct bcm590xx *bcm590xx = dev_get_drvdata(pdev->dev.parent);
struct bcm590xx_reg *pmu;
+ const struct bcm590xx_reg_data *info;
struct regulator_config config = { };
- struct bcm590xx_info *info;
struct regulator_dev *rdev;
- int i;
+ unsigned int i;
pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
@@ -287,65 +1110,53 @@ static int bcm590xx_probe(struct platform_device *pdev)
pmu->mfd = bcm590xx;
- platform_set_drvdata(pdev, pmu);
-
- pmu->desc = devm_kcalloc(&pdev->dev,
- BCM590XX_NUM_REGS,
- sizeof(struct regulator_desc),
- GFP_KERNEL);
- if (!pmu->desc)
- return -ENOMEM;
+ switch (pmu->mfd->pmu_id) {
+ case BCM590XX_PMUID_BCM59054:
+ pmu->n_regulators = BCM59054_NUM_REGS;
+ if (pmu->mfd->rev_analog == BCM59054_REV_ANALOG_A1)
+ pmu->regs = bcm59054_a1_regs;
+ else
+ pmu->regs = bcm59054_regs;
+ break;
+ case BCM590XX_PMUID_BCM59056:
+ pmu->n_regulators = BCM59056_NUM_REGS;
+ pmu->regs = bcm59056_regs;
+ break;
+ default:
+ dev_err(bcm590xx->dev,
+ "unknown device type, could not initialize\n");
+ return -EINVAL;
+ }
- info = bcm590xx_regs;
-
- for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) {
- /* Register the regulators */
- pmu->desc[i].name = info->name;
- pmu->desc[i].of_match = of_match_ptr(info->name);
- pmu->desc[i].regulators_node = of_match_ptr("regulators");
- pmu->desc[i].supply_name = info->vin_name;
- pmu->desc[i].id = i;
- pmu->desc[i].volt_table = info->volt_table;
- pmu->desc[i].n_voltages = info->n_voltages;
- pmu->desc[i].linear_ranges = info->linear_ranges;
- pmu->desc[i].n_linear_ranges = info->n_linear_ranges;
-
- if ((BCM590XX_REG_IS_LDO(i)) || (BCM590XX_REG_IS_GPLDO(i))) {
- pmu->desc[i].ops = &bcm590xx_ops_ldo;
- pmu->desc[i].vsel_mask = BCM590XX_LDO_VSEL_MASK;
- } else if (BCM590XX_REG_IS_VBUS(i))
- pmu->desc[i].ops = &bcm590xx_ops_vbus;
- else {
- pmu->desc[i].ops = &bcm590xx_ops_dcdc;
- pmu->desc[i].vsel_mask = BCM590XX_SR_VSEL_MASK;
- }
+ platform_set_drvdata(pdev, pmu);
- if (BCM590XX_REG_IS_VBUS(i))
- pmu->desc[i].enable_mask = BCM590XX_VBUS_ENABLE;
- else {
- pmu->desc[i].vsel_reg = bcm590xx_get_vsel_register(i);
- pmu->desc[i].enable_is_inverted = true;
- pmu->desc[i].enable_mask = BCM590XX_REG_ENABLE;
- }
- pmu->desc[i].enable_reg = bcm590xx_get_enable_register(i);
- pmu->desc[i].type = REGULATOR_VOLTAGE;
- pmu->desc[i].owner = THIS_MODULE;
+ /* Register the regulators */
+ for (i = 0; i < pmu->n_regulators; i++) {
+ info = &pmu->regs[i];
config.dev = bcm590xx->dev;
config.driver_data = pmu;
- if (BCM590XX_REG_IS_GPLDO(i) || BCM590XX_REG_IS_VBUS(i))
- config.regmap = bcm590xx->regmap_sec;
- else
- config.regmap = bcm590xx->regmap_pri;
- rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i],
- &config);
- if (IS_ERR(rdev)) {
+ switch (info->regmap) {
+ case BCM590XX_REGMAP_PRI:
+ config.regmap = bcm590xx->regmap_pri;
+ break;
+ case BCM590XX_REGMAP_SEC:
+ config.regmap = bcm590xx->regmap_sec;
+ break;
+ default:
dev_err(bcm590xx->dev,
- "failed to register %s regulator\n",
+ "invalid regmap for %s regulator; this is a driver bug\n",
pdev->name);
- return PTR_ERR(rdev);
+ return -EINVAL;
}
+
+ rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(bcm590xx->dev, PTR_ERR(rdev),
+ "failed to register %s regulator\n",
+ pdev->name);
}
return 0;
diff --git a/drivers/regulator/bd96801-regulator.c b/drivers/regulator/bd96801-regulator.c
index 3a9d772491a8..24d21172298b 100644
--- a/drivers/regulator/bd96801-regulator.c
+++ b/drivers/regulator/bd96801-regulator.c
@@ -83,6 +83,7 @@ enum {
#define BD96801_LDO6_VSEL_REG 0x26
#define BD96801_LDO7_VSEL_REG 0x27
#define BD96801_BUCK_VSEL_MASK 0x1F
+#define BD96805_BUCK_VSEL_MASK 0x3f
#define BD96801_LDO_VSEL_MASK 0xff
#define BD96801_MASK_RAMP_DELAY 0xc0
@@ -90,6 +91,7 @@ enum {
#define BD96801_BUCK_INT_VOUT_MASK 0xff
#define BD96801_BUCK_VOLTS 256
+#define BD96805_BUCK_VOLTS 64
#define BD96801_LDO_VOLTS 256
#define BD96801_OVP_MASK 0x03
@@ -160,6 +162,30 @@ static const struct linear_range bd96801_buck_init_volts[] = {
REGULATOR_LINEAR_RANGE(3300000 - 150000, 0xed, 0xff, 0),
};
+/* BD96802 uses same voltage ranges for bucks as BD96801 */
+#define bd96802_tune_volts bd96801_tune_volts
+#define bd96802_buck_init_volts bd96801_buck_init_volts
+
+/*
+ * On BD96805 we have similar "negative tuning range" as on BD96801, except
+ * that the max tuning is -310 ... +310 mV (instead of the 150mV). We use same
+ * approach as with the BD96801 ranges.
+ */
+static const struct linear_range bd96805_tune_volts[] = {
+ REGULATOR_LINEAR_RANGE(310000, 0x00, 0x1F, 10000),
+ REGULATOR_LINEAR_RANGE(0, 0x20, 0x3F, 10000),
+};
+
+static const struct linear_range bd96805_buck_init_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000 - 310000, 0x00, 0xc8, 5000),
+ REGULATOR_LINEAR_RANGE(1550000 - 310000, 0xc9, 0xec, 50000),
+ REGULATOR_LINEAR_RANGE(3300000 - 310000, 0xed, 0xff, 0),
+};
+
+/* BD96806 uses same voltage ranges for bucks as BD96805 */
+#define bd96806_tune_volts bd96805_tune_volts
+#define bd96806_buck_init_volts bd96805_buck_init_volts
+
static const struct linear_range bd96801_ldo_int_volts[] = {
REGULATOR_LINEAR_RANGE(300000, 0x00, 0x78, 25000),
REGULATOR_LINEAR_RANGE(3300000, 0x79, 0xff, 0),
@@ -198,89 +224,89 @@ struct bd96801_irqinfo {
static const struct bd96801_irqinfo buck1_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-h", 500,
- "bd96801-buck1-overcurr-h"),
+ "buck1-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-l", 500,
- "bd96801-buck1-overcurr-l"),
+ "buck1-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-n", 500,
- "bd96801-buck1-overcurr-n"),
+ "buck1-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck1-over-voltage", 500,
- "bd96801-buck1-overvolt"),
+ "buck1-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck1-under-voltage", 500,
- "bd96801-buck1-undervolt"),
+ "buck1-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck1-over-temp", 500,
- "bd96801-buck1-thermal")
+ "buck1-thermal")
};
static const struct bd96801_irqinfo buck2_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-h", 500,
- "bd96801-buck2-overcurr-h"),
+ "buck2-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-l", 500,
- "bd96801-buck2-overcurr-l"),
+ "buck2-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-n", 500,
- "bd96801-buck2-overcurr-n"),
+ "buck2-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck2-over-voltage", 500,
- "bd96801-buck2-overvolt"),
+ "buck2-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck2-under-voltage", 500,
- "bd96801-buck2-undervolt"),
+ "buck2-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck2-over-temp", 500,
- "bd96801-buck2-thermal")
+ "buck2-thermal")
};
static const struct bd96801_irqinfo buck3_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-h", 500,
- "bd96801-buck3-overcurr-h"),
+ "buck3-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-l", 500,
- "bd96801-buck3-overcurr-l"),
+ "buck3-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-n", 500,
- "bd96801-buck3-overcurr-n"),
+ "buck3-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck3-over-voltage", 500,
- "bd96801-buck3-overvolt"),
+ "buck3-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck3-under-voltage", 500,
- "bd96801-buck3-undervolt"),
+ "buck3-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck3-over-temp", 500,
- "bd96801-buck3-thermal")
+ "buck3-thermal")
};
static const struct bd96801_irqinfo buck4_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-h", 500,
- "bd96801-buck4-overcurr-h"),
+ "buck4-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-l", 500,
- "bd96801-buck4-overcurr-l"),
+ "buck4-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-n", 500,
- "bd96801-buck4-overcurr-n"),
+ "buck4-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck4-over-voltage", 500,
- "bd96801-buck4-overvolt"),
+ "buck4-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck4-under-voltage", 500,
- "bd96801-buck4-undervolt"),
+ "buck4-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck4-over-temp", 500,
- "bd96801-buck4-thermal")
+ "buck4-thermal")
};
static const struct bd96801_irqinfo ldo5_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "ldo5-overcurr", 500,
- "bd96801-ldo5-overcurr"),
+ "ldo5-overcurr"),
BD96801_IRQINFO(BD96801_PROT_OVP, "ldo5-over-voltage", 500,
- "bd96801-ldo5-overvolt"),
+ "ldo5-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "ldo5-under-voltage", 500,
- "bd96801-ldo5-undervolt"),
+ "ldo5-undervolt"),
};
static const struct bd96801_irqinfo ldo6_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "ldo6-overcurr", 500,
- "bd96801-ldo6-overcurr"),
+ "ldo6-overcurr"),
BD96801_IRQINFO(BD96801_PROT_OVP, "ldo6-over-voltage", 500,
- "bd96801-ldo6-overvolt"),
+ "ldo6-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "ldo6-under-voltage", 500,
- "bd96801-ldo6-undervolt"),
+ "ldo6-undervolt"),
};
static const struct bd96801_irqinfo ldo7_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "ldo7-overcurr", 500,
- "bd96801-ldo7-overcurr"),
+ "ldo7-overcurr"),
BD96801_IRQINFO(BD96801_PROT_OVP, "ldo7-over-voltage", 500,
- "bd96801-ldo7-overvolt"),
+ "ldo7-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "ldo7-under-voltage", 500,
- "bd96801-ldo7-undervolt"),
+ "ldo7-undervolt"),
};
struct bd96801_irq_desc {
@@ -302,6 +328,7 @@ struct bd96801_pmic_data {
struct bd96801_regulator_data regulator_data[BD96801_NUM_REGULATORS];
struct regmap *regmap;
int fatal_ind;
+ int num_regulators;
};
static int ldo_map_notif(int irq, struct regulator_irq_data *rid,
@@ -503,6 +530,70 @@ static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap,
* case later. What we can easly do for preparing is to not use static global
* data for regulators though.
*/
+static const struct bd96801_pmic_data bd96802_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96802_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96802_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96802_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96802_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96802_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96802_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96802_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96802_buck_init_volts),
+ },
+ },
+ .num_regulators = 2,
+};
+
static const struct bd96801_pmic_data bd96801_data = {
.regulator_data = {
{
@@ -688,11 +779,265 @@ static const struct bd96801_pmic_data bd96801_data = {
.ldo_vol_lvl = BD96801_LDO7_VOL_LVL_REG,
},
},
+ .num_regulators = 7,
};
-static int initialize_pmic_data(struct device *dev,
+static const struct bd96801_pmic_data bd96805_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ }, {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("buck3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK3,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK3_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK3_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK3_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck3_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck3_irqinfo),
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("buck4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK4,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK4_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK4_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK4_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck4_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck4_irqinfo),
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("ldo5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO5,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO5_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO5_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo5_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo5_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO5_VOL_LVL_REG,
+ }, {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("ldo6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO6,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO6_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO6_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo6_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo6_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO6_VOL_LVL_REG,
+ }, {
+ .desc = {
+ .name = "ldo7",
+ .of_match = of_match_ptr("ldo7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO7,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO7_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO7_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo7_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo7_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO7_VOL_LVL_REG,
+ },
+ },
+ .num_regulators = 7,
+};
+
+static const struct bd96801_pmic_data bd96806_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96806_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96806_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96806_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96806_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96806_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96806_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96806_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96806_buck_init_volts),
+ },
+ },
+ .num_regulators = 2,
+};
+
+static int initialize_pmic_data(struct platform_device *pdev,
struct bd96801_pmic_data *pdata)
{
+ struct device *dev = &pdev->dev;
int r, i;
/*
@@ -700,7 +1045,7 @@ static int initialize_pmic_data(struct device *dev,
* wish to modify IRQ information independently for each driver
* instance.
*/
- for (r = 0; r < BD96801_NUM_REGULATORS; r++) {
+ for (r = 0; r < pdata->num_regulators; r++) {
const struct bd96801_irqinfo *template;
struct bd96801_irqinfo *new;
int num_infos;
@@ -741,8 +1086,7 @@ static int bd96801_rdev_errb_irqs(struct platform_device *pdev,
int i;
void *retp;
static const char * const single_out_errb_irqs[] = {
- "bd96801-%s-pvin-err", "bd96801-%s-ovp-err",
- "bd96801-%s-uvp-err", "bd96801-%s-shdn-err",
+ "%s-pvin-err", "%s-ovp-err", "%s-uvp-err", "%s-shdn-err",
};
for (i = 0; i < ARRAY_SIZE(single_out_errb_irqs); i++) {
@@ -779,12 +1123,10 @@ static int bd96801_global_errb_irqs(struct platform_device *pdev,
int i, num_irqs;
void *retp;
static const char * const global_errb_irqs[] = {
- "bd96801-otp-err", "bd96801-dbist-err", "bd96801-eep-err",
- "bd96801-abist-err", "bd96801-prstb-err", "bd96801-drmoserr1",
- "bd96801-drmoserr2", "bd96801-slave-err", "bd96801-vref-err",
- "bd96801-tsd", "bd96801-uvlo-err", "bd96801-ovlo-err",
- "bd96801-osc-err", "bd96801-pon-err", "bd96801-poff-err",
- "bd96801-cmd-shdn-err", "bd96801-int-shdn-err"
+ "otp-err", "dbist-err", "eep-err", "abist-err", "prstb-err",
+ "drmoserr1", "drmoserr2", "slave-err", "vref-err", "tsd",
+ "uvlo-err", "ovlo-err", "osc-err", "pon-err", "poff-err",
+ "cmd-shdn-err", "int-shdn-err"
};
num_irqs = ARRAY_SIZE(global_errb_irqs);
@@ -869,6 +1211,7 @@ static int bd96801_probe(struct platform_device *pdev)
{
struct regulator_dev *ldo_errs_rdev_arr[BD96801_NUM_LDOS];
struct regulator_dev *all_rdevs[BD96801_NUM_REGULATORS];
+ struct bd96801_pmic_data *pdata_template;
struct bd96801_regulator_data *rdesc;
struct regulator_config config = {};
int ldo_errs_arr[BD96801_NUM_LDOS];
@@ -881,12 +1224,16 @@ static int bd96801_probe(struct platform_device *pdev)
parent = pdev->dev.parent;
- pdata = devm_kmemdup(&pdev->dev, &bd96801_data, sizeof(bd96801_data),
+ pdata_template = (struct bd96801_pmic_data *)platform_get_device_id(pdev)->driver_data;
+ if (!pdata_template)
+ return -ENODEV;
+
+ pdata = devm_kmemdup(&pdev->dev, pdata_template, sizeof(bd96801_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (initialize_pmic_data(&pdev->dev, pdata))
+ if (initialize_pmic_data(pdev, pdata))
return -ENOMEM;
pdata->regmap = dev_get_regmap(parent, NULL);
@@ -909,11 +1256,11 @@ static int bd96801_probe(struct platform_device *pdev)
use_errb = true;
ret = bd96801_walk_regulator_dt(&pdev->dev, pdata->regmap, rdesc,
- BD96801_NUM_REGULATORS);
+ pdata->num_regulators);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(pdata->regulator_data); i++) {
+ for (i = 0; i < pdata->num_regulators; i++) {
struct regulator_dev *rdev;
struct bd96801_irq_desc *idesc = &rdesc[i].irq_desc;
int j;
@@ -926,6 +1273,7 @@ static int bd96801_probe(struct platform_device *pdev)
rdesc[i].desc.name);
return PTR_ERR(rdev);
}
+
all_rdevs[i] = rdev;
/*
* LDOs don't have own temperature monitoring. If temperature
@@ -956,12 +1304,12 @@ static int bd96801_probe(struct platform_device *pdev)
if (temp_notif_ldos) {
int irq;
struct regulator_irq_desc tw_desc = {
- .name = "bd96801-core-thermal",
+ .name = "core-thermal",
.irq_off_ms = 500,
.map_event = ldo_map_notif,
};
- irq = platform_get_irq_byname(pdev, "bd96801-core-thermal");
+ irq = platform_get_irq_byname(pdev, "core-thermal");
if (irq < 0)
return irq;
@@ -975,14 +1323,17 @@ static int bd96801_probe(struct platform_device *pdev)
if (use_errb)
return bd96801_global_errb_irqs(pdev, all_rdevs,
- ARRAY_SIZE(all_rdevs));
+ pdata->num_regulators);
return 0;
}
static const struct platform_device_id bd96801_pmic_id[] = {
- { "bd96801-regulator", },
- { }
+ { "bd96801-regulator", (kernel_ulong_t)&bd96801_data },
+ { "bd96802-regulator", (kernel_ulong_t)&bd96802_data },
+ { "bd96805-regulator", (kernel_ulong_t)&bd96805_data },
+ { "bd96806-regulator", (kernel_ulong_t)&bd96806_data },
+ { },
};
MODULE_DEVICE_TABLE(platform, bd96801_pmic_id);
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 5ff4e2fee4ab..1c7598b8475d 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -36,7 +36,7 @@ obj-$(CONFIG_RCAR_REMOTEPROC) += rcar_rproc.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
-obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
-obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o
-obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
+obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o ti_k3_common.o
+obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o ti_k3_common.o
+obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o ti_k3_common.o
obj-$(CONFIG_XLNX_R5_REMOTEPROC) += xlnx_r5_remoteproc.o
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 90cb1fc13e71..5ee622bf5352 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -36,9 +36,18 @@ module_param_named(no_mailboxes, no_mailboxes, int, 0644);
MODULE_PARM_DESC(no_mailboxes,
"There is no mailbox between cores, so ignore remote proc reply after start, default is 0 (off).");
+/* Flag indicating that the remote is up and running */
#define REMOTE_IS_READY BIT(0)
+/* Flag indicating that the host should wait for a firmware-ready response */
+#define WAIT_FW_READY BIT(1)
#define REMOTE_READY_WAIT_MAX_RETRIES 500
+/*
+ * This flag is set in the DSP resource table's features field to indicate
+ * that the firmware requires the host NOT to wait for a FW_READY response.
+ */
+#define FEATURE_DONT_WAIT_FW_READY BIT(0)
+
/* att flags */
/* DSP own area */
#define ATT_OWN BIT(31)
@@ -73,6 +82,10 @@ MODULE_PARM_DESC(no_mailboxes,
#define IMX8ULP_SIP_HIFI_XRDC 0xc200000e
+#define FW_RSC_NXP_S_MAGIC ((uint32_t)'n' << 24 | \
+ (uint32_t)'x' << 16 | \
+ (uint32_t)'p' << 8 | \
+ (uint32_t)'s')
/*
* enum - Predefined Mailbox Messages
*
@@ -139,6 +152,24 @@ struct imx_dsp_rproc_dcfg {
int (*reset)(struct imx_dsp_rproc *priv);
};
+/**
+ * struct fw_rsc_imx_dsp - i.MX DSP specific info
+ *
+ * @len: length of the resource entry
+ * @magic_num: 32-bit magic number
+ * @version: version of data structure
+ * @features: feature flags supported by the i.MX DSP firmware
+ *
+ * This represents a DSP-specific resource in the firmware's
+ * resource table, providing information on supported features.
+ */
+struct fw_rsc_imx_dsp {
+ uint32_t len;
+ uint32_t magic_num;
+ uint32_t version;
+ uint32_t features;
+} __packed;
+
static const struct imx_rproc_att imx_dsp_rproc_att_imx8qm[] = {
/* dev addr , sys addr , size , flags */
{ 0x596e8000, 0x556e8000, 0x00008000, ATT_OWN },
@@ -297,6 +328,66 @@ static int imx_dsp_rproc_ready(struct rproc *rproc)
return -ETIMEDOUT;
}
+/**
+ * imx_dsp_rproc_handle_rsc() - Handle DSP-specific resource table entries
+ * @rproc: remote processor instance
+ * @rsc_type: resource type identifier
+ * @rsc: pointer to the resource entry
+ * @offset: offset of the resource entry
+ * @avail: available space in the resource table
+ *
+ * Parse the DSP-specific resource entry and update flags accordingly.
+ * If the WAIT_FW_READY feature is set, the host must wait for the firmware
+ * to signal readiness before proceeding with execution.
+ *
+ * Return: RSC_HANDLED if processed successfully, RSC_IGNORED otherwise.
+ */
+static int imx_dsp_rproc_handle_rsc(struct rproc *rproc, u32 rsc_type,
+ void *rsc, int offset, int avail)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct fw_rsc_imx_dsp *imx_dsp_rsc = rsc;
+ struct device *dev = rproc->dev.parent;
+
+ if (!imx_dsp_rsc) {
+ dev_dbg(dev, "Invalid fw_rsc_imx_dsp.\n");
+ return RSC_IGNORED;
+ }
+
+ /* Make sure resource isn't truncated */
+ if (sizeof(struct fw_rsc_imx_dsp) > avail ||
+ sizeof(struct fw_rsc_imx_dsp) != imx_dsp_rsc->len) {
+ dev_dbg(dev, "Resource fw_rsc_imx_dsp is truncated.\n");
+ return RSC_IGNORED;
+ }
+
+ /*
+ * If FW_RSC_NXP_S_MAGIC number is not found then
+ * wait for fw_ready reply (default work flow)
+ */
+ if (imx_dsp_rsc->magic_num != FW_RSC_NXP_S_MAGIC) {
+ dev_dbg(dev, "Invalid resource table magic number.\n");
+ return RSC_IGNORED;
+ }
+
+ /*
+ * For now, in struct fw_rsc_imx_dsp, version 0,
+ * only FEATURE_DONT_WAIT_FW_READY is valid.
+ *
+ * When adding new features, please upgrade version.
+ */
+ if (imx_dsp_rsc->version > 0) {
+ dev_warn(dev, "Unexpected fw_rsc_imx_dsp version %d.\n",
+ imx_dsp_rsc->version);
+ return RSC_IGNORED;
+ }
+
+ if (imx_dsp_rsc->features & FEATURE_DONT_WAIT_FW_READY)
+ priv->flags &= ~WAIT_FW_READY;
+
+ return RSC_HANDLED;
+}
+
/*
* Start function for rproc_ops
*
@@ -335,8 +426,8 @@ static int imx_dsp_rproc_start(struct rproc *rproc)
if (ret)
dev_err(dev, "Failed to enable remote core!\n");
- else
- ret = imx_dsp_rproc_ready(rproc);
+ else if (priv->flags & WAIT_FW_READY)
+ return imx_dsp_rproc_ready(rproc);
return ret;
}
@@ -939,6 +1030,7 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
.kick = imx_dsp_rproc_kick,
.load = imx_dsp_rproc_elf_load_segments,
.parse_fw = imx_dsp_rproc_parse_fw,
+ .handle_rsc = imx_dsp_rproc_handle_rsc,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
@@ -1058,6 +1150,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
priv = rproc->priv;
priv->rproc = rproc;
priv->dsp_dcfg = dsp_dcfg;
+ /* By default, host waits for fw_ready reply */
+ priv->flags |= WAIT_FW_READY;
if (no_mailboxes)
imx_dsp_rproc_mbox_init = imx_dsp_rproc_mbox_no_alloc;
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index b989718776bd..2b52b403eb3f 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -196,6 +196,7 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
err_device_del:
device_del(&iris->dev);
+ put_device(&iris->dev);
return ERR_PTR(ret);
}
@@ -203,4 +204,5 @@ err_device_del:
void qcom_iris_remove(struct qcom_iris *iris)
{
device_del(&iris->dev);
+ put_device(&iris->dev);
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index b21eedefff87..81b2ccf988e8 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1617,7 +1617,7 @@ static int rproc_attach(struct rproc *rproc)
ret = rproc_set_rsc_table(rproc);
if (ret) {
dev_err(dev, "can't load resource table: %d\n", ret);
- goto unprepare_device;
+ goto clean_up_resources;
}
/* reset max_notifyid */
@@ -1634,7 +1634,7 @@ static int rproc_attach(struct rproc *rproc)
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
- goto unprepare_device;
+ goto clean_up_resources;
}
/* Allocate carveout resources associated to rproc */
@@ -1653,9 +1653,9 @@ static int rproc_attach(struct rproc *rproc)
clean_up_resources:
rproc_resource_cleanup(rproc);
-unprepare_device:
/* release HW resources if needed */
rproc_unprepare_device(rproc);
+ kfree(rproc->clean_table);
disable_iommu:
rproc_disable_iommu(rproc);
return ret;
@@ -2025,7 +2025,6 @@ int rproc_shutdown(struct rproc *rproc)
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
- rproc->table_sz = 0;
out:
mutex_unlock(&rproc->lock);
return ret;
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index b02b36a3f515..431648607d53 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -835,6 +835,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct stm32_rproc *ddata;
struct device_node *np = dev->of_node;
+ const char *fw_name;
struct rproc *rproc;
unsigned int state;
int ret;
@@ -843,7 +844,12 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (ret)
return ret;
- rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ /* Look for an optional firmware name */
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
+ rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, fw_name, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
diff --git a/drivers/remoteproc/ti_k3_common.c b/drivers/remoteproc/ti_k3_common.c
new file mode 100644
index 000000000000..d5dccc81d460
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_common.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 Remote Processor(s) driver common code
+ *
+ * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and
+ * ti_k3_m4_remoteproc.c.
+ *
+ * ti_k3_r5_remoteproc.c:
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_dsp_remoteproc.c:
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_m4_remoteproc.c:
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+#include "ti_k3_common.h"
+
+/**
+ * k3_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the K3 mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+void k3_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_rproc *kproc = container_of(client, struct k3_rproc, client);
+ struct device *dev = kproc->rproc->dev.parent;
+ struct rproc *rproc = kproc->rproc;
+ u32 msg = (u32)(uintptr_t)(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", rproc->name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+EXPORT_SYMBOL_GPL(k3_rproc_mbox_callback);
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+void k3_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 msg = (u32)vqid;
+ int ret;
+
+ /*
+ * Send the index of the triggered virtqueue in the mailbox payload.
+ * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
+ * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+EXPORT_SYMBOL_GPL(k3_rproc_kick);
+
+/* Put the remote processor into reset */
+int k3_rproc_reset(struct k3_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset) {
+ ret = reset_control_assert(kproc->reset);
+ if (ret)
+ dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
+ } else {
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_reset);
+
+/* Release the remote processor from reset */
+int k3_rproc_release(struct k3_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset) {
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(dev, "module-reset assert back failed\n");
+ }
+ } else {
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_release);
+
+int k3_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_request_mbox);
+
+/*
+ * The K3 DSP and M4 cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on remote cores to allow loading into the
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the remote cores to run.
+ */
+int k3_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is running already no need to deassert the module reset */
+ if (rproc->state == RPROC_DETACHED)
+ return 0;
+
+ /*
+ * Ensure the local reset is asserted so the core doesn't
+ * execute bogus code when the module reset is released.
+ */
+ if (kproc->data->uses_lreset) {
+ ret = k3_rproc_reset(kproc);
+ if (ret)
+ return ret;
+
+ ret = reset_control_status(kproc->reset);
+ if (ret <= 0) {
+ dev_err(dev, "local reset still not asserted\n");
+ return ret;
+ }
+ }
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_prepare);
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable K3 DSP and M4 cores. This completes the second
+ * portion of powering down the remote core. The cores themselves are only
+ * halted in the .stop() callback through the local reset, and the .unprepare()
+ * ops is invoked by the remoteproc core after the remoteproc is stopped to
+ * balance the global reset.
+ */
+int k3_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is going to be detached do not assert the module reset */
+ if (rproc->state == RPROC_DETACHED)
+ return 0;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_unprepare);
+
+/*
+ * Power up the remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met. This callback is invoked only in remoteproc mode.
+ */
+int k3_rproc_start(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+
+ return k3_rproc_release(kproc);
+}
+EXPORT_SYMBOL_GPL(k3_rproc_start);
+
+/*
+ * Stop the remote processor.
+ *
+ * This function puts the remote processor into reset, and finishes processing
+ * of any pending messages. This callback is invoked only in remoteproc mode.
+ */
+int k3_rproc_stop(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+
+ return k3_rproc_reset(kproc);
+}
+EXPORT_SYMBOL_GPL(k3_rproc_stop);
+
+/*
+ * Attach to a running remote processor (IPC-only mode)
+ *
+ * The rproc attach callback is a NOP. The remote processor is already booted,
+ * and all required resources have been acquired during probe routine, so there
+ * is no need to issue any TI-SCI commands to boot the remote cores in IPC-only
+ * mode. This callback is invoked only in IPC-only mode and exists because
+ * rproc_validate() checks for its existence.
+ */
+int k3_rproc_attach(struct rproc *rproc) { return 0; }
+EXPORT_SYMBOL_GPL(k3_rproc_attach);
+
+/*
+ * Detach from a running remote processor (IPC-only mode)
+ *
+ * The rproc detach callback is a NOP. The remote processor is not stopped and
+ * will be left in booted state in IPC-only mode. This callback is invoked only
+ * in IPC-only mode and exists for sanity sake
+ */
+int k3_rproc_detach(struct rproc *rproc) { return 0; }
+EXPORT_SYMBOL_GPL(k3_rproc_detach);
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted remote processor in IPC-only
+ * mode. The remote processor firmwares follow a design-by-contract approach
+ * and are expected to have the resource table at the base of the DDR region
+ * reserved for firmware usage. This provides flexibility for the remote
+ * processor to be booted by different bootloaders that may or may not have the
+ * ability to publish the resource table address and size through a DT
+ * property.
+ */
+struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+EXPORT_SYMBOL_GPL(k3_get_loaded_rsc_table);
+
+/*
+ * Custom function to translate a remote processor device address (internal
+ * RAMs only) to a kernel virtual address. The remote processors can access
+ * their RAMs at either an internal address visible only from a remote
+ * processor, or at the SoC-level bus address. Both these addresses need to be
+ * looked through for translation. The translated addresses can be used either
+ * by the remoteproc core for loading (when using kernel remoteproc loader), or
+ * by any rpmsg bus drivers.
+ */
+void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ /* handle rproc-view addresses */
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+
+ /* handle SoC-view addresses */
+ if (da >= bus_addr && (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_da_to_va);
+
+int k3_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_rproc *kproc)
+{
+ const struct k3_rproc_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ data->mems[i].name);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ data->mems[i].name);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!kproc->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ data->mems[i].name, &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_of_get_memories);
+
+void k3_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+EXPORT_SYMBOL_GPL(k3_mem_release);
+
+int k3_reserved_mem_init(struct k3_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems < 0) {
+ dev_err(dev, "device does not reserved memory regions (%d)\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(dev, k3_mem_release, dev);
+ if (ret)
+ return ret;
+
+ num_rmems--;
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ of_node_put(rmem_np);
+ if (!rmem)
+ return -EINVAL;
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_reserved_mem_init);
+
+void k3_release_tsp(void *data)
+{
+ struct ti_sci_proc *tsp = data;
+
+ ti_sci_proc_release(tsp);
+}
+EXPORT_SYMBOL_GPL(k3_release_tsp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 common Remoteproc code");
diff --git a/drivers/remoteproc/ti_k3_common.h b/drivers/remoteproc/ti_k3_common.h
new file mode 100644
index 000000000000..aee3c28dbe51
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_common.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TI K3 Remote Processor(s) driver common code
+ *
+ * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and
+ * ti_k3_m4_remoteproc.c.
+ *
+ * ti_k3_r5_remoteproc.c:
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_dsp_remoteproc.c:
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_m4_remoteproc.c:
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#ifndef REMOTEPROC_TI_K3_COMMON_H
+#define REMOTEPROC_TI_K3_COMMON_H
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct k3_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from remote processor view
+ * @size: Size of the memory region
+ */
+struct k3_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_rproc_mem_data - memory definitions for a remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_rproc_dev_data - device data structure for a remote processor
+ * @mems: pointer to memory definitions for a remote processor
+ * @num_mems: number of memory regions in @mems
+ * @boot_align_addr: boot vector address alignment granularity
+ * @uses_lreset: flag to denote the need for local reset management
+ */
+struct k3_rproc_dev_data {
+ const struct k3_rproc_mem_data *mems;
+ u32 num_mems;
+ u32 boot_align_addr;
+ bool uses_lreset;
+};
+
+/**
+ * struct k3_rproc - k3 remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @data: pointer to DSP-specific device data
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ * @priv: void pointer to carry any private data
+ */
+struct k3_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_rproc_mem *mem;
+ int num_mems;
+ struct k3_rproc_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ const struct k3_rproc_dev_data *data;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+ void *priv;
+};
+
+void k3_rproc_mbox_callback(struct mbox_client *client, void *data);
+void k3_rproc_kick(struct rproc *rproc, int vqid);
+int k3_rproc_reset(struct k3_rproc *kproc);
+int k3_rproc_release(struct k3_rproc *kproc);
+int k3_rproc_request_mbox(struct rproc *rproc);
+int k3_rproc_prepare(struct rproc *rproc);
+int k3_rproc_unprepare(struct rproc *rproc);
+int k3_rproc_start(struct rproc *rproc);
+int k3_rproc_stop(struct rproc *rproc);
+int k3_rproc_attach(struct rproc *rproc);
+int k3_rproc_detach(struct rproc *rproc);
+struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz);
+void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len,
+ bool *is_iomem);
+int k3_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_rproc *kproc);
+void k3_mem_release(void *data);
+int k3_reserved_mem_init(struct k3_rproc *kproc);
+void k3_release_tsp(void *data);
+#endif /* REMOTEPROC_TI_K3_COMMON_H */
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index a695890254ff..7a72933bd403 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -20,291 +20,7 @@
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
-
-#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
-
-/**
- * struct k3_dsp_mem - internal memory structure
- * @cpu_addr: MPU virtual address of the memory region
- * @bus_addr: Bus address used to access the memory region
- * @dev_addr: Device address of the memory region from DSP view
- * @size: Size of the memory region
- */
-struct k3_dsp_mem {
- void __iomem *cpu_addr;
- phys_addr_t bus_addr;
- u32 dev_addr;
- size_t size;
-};
-
-/**
- * struct k3_dsp_mem_data - memory definitions for a DSP
- * @name: name for this memory entry
- * @dev_addr: device address for the memory entry
- */
-struct k3_dsp_mem_data {
- const char *name;
- const u32 dev_addr;
-};
-
-/**
- * struct k3_dsp_dev_data - device data structure for a DSP
- * @mems: pointer to memory definitions for a DSP
- * @num_mems: number of memory regions in @mems
- * @boot_align_addr: boot vector address alignment granularity
- * @uses_lreset: flag to denote the need for local reset management
- */
-struct k3_dsp_dev_data {
- const struct k3_dsp_mem_data *mems;
- u32 num_mems;
- u32 boot_align_addr;
- bool uses_lreset;
-};
-
-/**
- * struct k3_dsp_rproc - k3 DSP remote processor driver structure
- * @dev: cached device pointer
- * @rproc: remoteproc device handle
- * @mem: internal memory regions data
- * @num_mems: number of internal memory regions
- * @rmem: reserved memory regions data
- * @num_rmems: number of reserved memory regions
- * @reset: reset control handle
- * @data: pointer to DSP-specific device data
- * @tsp: TI-SCI processor control handle
- * @ti_sci: TI-SCI handle
- * @ti_sci_id: TI-SCI device identifier
- * @mbox: mailbox channel handle
- * @client: mailbox client to request the mailbox channel
- */
-struct k3_dsp_rproc {
- struct device *dev;
- struct rproc *rproc;
- struct k3_dsp_mem *mem;
- int num_mems;
- struct k3_dsp_mem *rmem;
- int num_rmems;
- struct reset_control *reset;
- const struct k3_dsp_dev_data *data;
- struct ti_sci_proc *tsp;
- const struct ti_sci_handle *ti_sci;
- u32 ti_sci_id;
- struct mbox_chan *mbox;
- struct mbox_client client;
-};
-
-/**
- * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
- * @client: mailbox client pointer used for requesting the mailbox channel
- * @data: mailbox payload
- *
- * This handler is invoked by the OMAP mailbox driver whenever a mailbox
- * message is received. Usually, the mailbox payload simply contains
- * the index of the virtqueue that is kicked by the remote processor,
- * and we let remoteproc core handle it.
- *
- * In addition to virtqueue indices, we also have some out-of-band values
- * that indicate different events. Those values are deliberately very
- * large so they don't coincide with virtqueue indices.
- */
-static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
-{
- struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
- client);
- struct device *dev = kproc->rproc->dev.parent;
- const char *name = kproc->rproc->name;
- u32 msg = omap_mbox_message(data);
-
- /* Do not forward messages from a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- dev_dbg(dev, "mbox msg: 0x%x\n", msg);
-
- switch (msg) {
- case RP_MBOX_CRASH:
- /*
- * remoteproc detected an exception, but error recovery is not
- * supported. So, just log this for now
- */
- dev_err(dev, "K3 DSP rproc %s crashed\n", name);
- break;
- case RP_MBOX_ECHO_REPLY:
- dev_info(dev, "received echo reply from %s\n", name);
- break;
- default:
- /* silently handle all other valid messages */
- if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
- return;
- if (msg > kproc->rproc->max_notifyid) {
- dev_dbg(dev, "dropping unknown message 0x%x", msg);
- return;
- }
- /* msg contains the index of the triggered vring */
- if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
- dev_dbg(dev, "no message was found in vqid %d\n", msg);
- }
-}
-
-/*
- * Kick the remote processor to notify about pending unprocessed messages.
- * The vqid usage is not used and is inconsequential, as the kick is performed
- * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
- * the remote processor is expected to process both its Tx and Rx virtqueues.
- */
-static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = rproc->dev.parent;
- mbox_msg_t msg = (mbox_msg_t)vqid;
- int ret;
-
- /* Do not forward messages to a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- /* send the index of the triggered virtqueue in the mailbox payload */
- ret = mbox_send_message(kproc->mbox, (void *)msg);
- if (ret < 0)
- dev_err(dev, "failed to send mailbox message (%pe)\n",
- ERR_PTR(ret));
-}
-
-/* Put the DSP processor into reset */
-static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- int ret;
-
- ret = reset_control_assert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
- return ret;
- }
-
- if (kproc->data->uses_lreset)
- return ret;
-
- ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
- if (reset_control_deassert(kproc->reset))
- dev_warn(dev, "local-reset deassert back failed\n");
- }
-
- return ret;
-}
-
-/* Release the DSP processor from reset */
-static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- int ret;
-
- if (kproc->data->uses_lreset)
- goto lreset;
-
- ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
- return ret;
- }
-
-lreset:
- ret = reset_control_deassert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
- if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id))
- dev_warn(dev, "module-reset assert back failed\n");
- }
-
- return ret;
-}
-
-static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct mbox_client *client = &kproc->client;
- struct device *dev = kproc->dev;
- int ret;
-
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_dsp_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox))
- return dev_err_probe(dev, PTR_ERR(kproc->mbox),
- "mbox_request_channel failed\n");
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
- mbox_free_channel(kproc->mbox);
- return ret;
- }
-
- return 0;
-}
-/*
- * The C66x DSP cores have a local reset that affects only the CPU, and a
- * generic module reset that powers on the device and allows the DSP internal
- * memories to be accessed while the local reset is asserted. This function is
- * used to release the global reset on C66x DSPs to allow loading into the DSP
- * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
- * firmware loading, and is followed by the .start() ops after loading to
- * actually let the C66x DSP cores run. This callback is invoked only in
- * remoteproc mode.
- */
-static int k3_dsp_rproc_prepare(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret)
- dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading (%pe)\n",
- ERR_PTR(ret));
-
- return ret;
-}
-
-/*
- * This function implements the .unprepare() ops and performs the complimentary
- * operations to that of the .prepare() ops. The function is used to assert the
- * global reset on applicable C66x cores. This completes the second portion of
- * powering down the C66x DSP cores. The cores themselves are only halted in the
- * .stop() callback through the local reset, and the .unprepare() ops is invoked
- * by the remoteproc core after the remoteproc is stopped to balance the global
- * reset. This callback is invoked only in remoteproc mode.
- */
-static int k3_dsp_rproc_unprepare(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret)
- dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
-
- return ret;
-}
+#include "ti_k3_common.h"
/*
* Power up the DSP remote processor.
@@ -315,7 +31,7 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc)
*/
static int k3_dsp_rproc_start(struct rproc *rproc)
{
- struct k3_dsp_rproc *kproc = rproc->priv;
+ struct k3_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
u32 boot_addr;
int ret;
@@ -332,288 +48,30 @@ static int k3_dsp_rproc_start(struct rproc *rproc)
if (ret)
return ret;
- ret = k3_dsp_rproc_release(kproc);
+ /* Call the K3 common start function after doing DSP specific stuff */
+ ret = k3_rproc_start(rproc);
if (ret)
return ret;
return 0;
}
-/*
- * Stop the DSP remote processor.
- *
- * This function puts the DSP processor into reset, and finishes processing
- * of any pending messages. This callback is invoked only in remoteproc mode.
- */
-static int k3_dsp_rproc_stop(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
-
- k3_dsp_rproc_reset(kproc);
-
- return 0;
-}
-
-/*
- * Attach to a running DSP remote processor (IPC-only mode)
- *
- * This rproc attach callback is a NOP. The remote processor is already booted,
- * and all required resources have been acquired during probe routine, so there
- * is no need to issue any TI-SCI commands to boot the DSP core. This callback
- * is invoked only in IPC-only mode and exists because rproc_validate() checks
- * for its existence.
- */
-static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; }
-
-/*
- * Detach from a running DSP remote processor (IPC-only mode)
- *
- * This rproc detach callback is a NOP. The DSP core is not stopped and will be
- * left to continue to run its booted firmware. This callback is invoked only in
- * IPC-only mode and exists for sanity sake.
- */
-static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; }
-
-/*
- * This function implements the .get_loaded_rsc_table() callback and is used
- * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
- * firmwares follow a design-by-contract approach and are expected to have the
- * resource table at the base of the DDR region reserved for firmware usage.
- * This provides flexibility for the remote processor to be booted by different
- * bootloaders that may or may not have the ability to publish the resource table
- * address and size through a DT property. This callback is invoked only in
- * IPC-only mode.
- */
-static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
- size_t *rsc_table_sz)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- if (!kproc->rmem[0].cpu_addr) {
- dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * NOTE: The resource table size is currently hard-coded to a maximum
- * of 256 bytes. The most common resource table usage for K3 firmwares
- * is to only have the vdev resource entry and an optional trace entry.
- * The exact size could be computed based on resource table address, but
- * the hard-coded value suffices to support the IPC-only mode.
- */
- *rsc_table_sz = 256;
- return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
-}
-
-/*
- * Custom function to translate a DSP device address (internal RAMs only) to a
- * kernel virtual address. The DSPs can access their RAMs at either an internal
- * address visible only from a DSP, or at the SoC-level bus address. Both these
- * addresses need to be looked through for translation. The translated addresses
- * can be used either by the remoteproc core for loading (when using kernel
- * remoteproc loader), or by any rpmsg bus drivers.
- */
-static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- void __iomem *va = NULL;
- phys_addr_t bus_addr;
- u32 dev_addr, offset;
- size_t size;
- int i;
-
- if (len == 0)
- return NULL;
-
- for (i = 0; i < kproc->num_mems; i++) {
- bus_addr = kproc->mem[i].bus_addr;
- dev_addr = kproc->mem[i].dev_addr;
- size = kproc->mem[i].size;
-
- if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
- /* handle DSP-view addresses */
- if (da >= dev_addr &&
- ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- } else {
- /* handle SoC-view addresses */
- if (da >= bus_addr &&
- (da + len) <= (bus_addr + size)) {
- offset = da - bus_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
- }
-
- /* handle static DDR reserved memory regions */
- for (i = 0; i < kproc->num_rmems; i++) {
- dev_addr = kproc->rmem[i].dev_addr;
- size = kproc->rmem[i].size;
-
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->rmem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- return NULL;
-}
-
static const struct rproc_ops k3_dsp_rproc_ops = {
- .start = k3_dsp_rproc_start,
- .stop = k3_dsp_rproc_stop,
- .kick = k3_dsp_rproc_kick,
- .da_to_va = k3_dsp_rproc_da_to_va,
+ .start = k3_dsp_rproc_start,
+ .stop = k3_rproc_stop,
+ .attach = k3_rproc_attach,
+ .detach = k3_rproc_detach,
+ .kick = k3_rproc_kick,
+ .da_to_va = k3_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_get_loaded_rsc_table,
};
-static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
- struct k3_dsp_rproc *kproc)
-{
- const struct k3_dsp_dev_data *data = kproc->data;
- struct device *dev = &pdev->dev;
- struct resource *res;
- int num_mems = 0;
- int i;
-
- num_mems = kproc->data->num_mems;
- kproc->mem = devm_kcalloc(kproc->dev, num_mems,
- sizeof(*kproc->mem), GFP_KERNEL);
- if (!kproc->mem)
- return -ENOMEM;
-
- for (i = 0; i < num_mems; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- data->mems[i].name);
- if (!res) {
- dev_err(dev, "found no memory resource for %s\n",
- data->mems[i].name);
- return -EINVAL;
- }
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res),
- dev_name(dev))) {
- dev_err(dev, "could not request %s region for resource\n",
- data->mems[i].name);
- return -EBUSY;
- }
-
- kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
- resource_size(res));
- if (!kproc->mem[i].cpu_addr) {
- dev_err(dev, "failed to map %s memory\n",
- data->mems[i].name);
- return -ENOMEM;
- }
- kproc->mem[i].bus_addr = res->start;
- kproc->mem[i].dev_addr = data->mems[i].dev_addr;
- kproc->mem[i].size = resource_size(res);
-
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- data->mems[i].name, &kproc->mem[i].bus_addr,
- kproc->mem[i].size, kproc->mem[i].cpu_addr,
- kproc->mem[i].dev_addr);
- }
- kproc->num_mems = num_mems;
-
- return 0;
-}
-
-static void k3_dsp_mem_release(void *data)
-{
- struct device *dev = data;
-
- of_reserved_mem_device_release(dev);
-}
-
-static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- struct device_node *np = dev->of_node;
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
- int num_rmems;
- int ret, i;
-
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
- if (num_rmems < 0) {
- dev_err(dev, "device does not reserved memory regions (%pe)\n",
- ERR_PTR(num_rmems));
- return -EINVAL;
- }
- if (num_rmems < 2) {
- dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
- num_rmems);
- return -EINVAL;
- }
-
- /* use reserved memory region 0 for vring DMA allocations */
- ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
- if (ret) {
- dev_err(dev, "device cannot initialize DMA pool (%pe)\n",
- ERR_PTR(ret));
- return ret;
- }
- ret = devm_add_action_or_reset(dev, k3_dsp_mem_release, dev);
- if (ret)
- return ret;
-
- num_rmems--;
- kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem)
- return -ENOMEM;
-
- /* use remaining reserved memory regions for static carveouts */
- for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
-
- kproc->rmem[i].bus_addr = rmem->base;
- /* 64-bit address regions currently not supported */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i + 1, &kproc->rmem[i].bus_addr,
- kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
- kproc->rmem[i].dev_addr);
- }
- kproc->num_rmems = num_rmems;
-
- return 0;
-}
-
-static void k3_dsp_release_tsp(void *data)
-{
- struct ti_sci_proc *tsp = data;
-
- ti_sci_proc_release(tsp);
-}
-
static int k3_dsp_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct k3_dsp_dev_data *data;
- struct k3_dsp_rproc *kproc;
+ const struct k3_rproc_dev_data *data;
+ struct k3_rproc *kproc;
struct rproc *rproc;
const char *fw_name;
bool p_state = false;
@@ -635,15 +93,15 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
rproc->has_iommu = false;
rproc->recovery_disabled = true;
if (data->uses_lreset) {
- rproc->ops->prepare = k3_dsp_rproc_prepare;
- rproc->ops->unprepare = k3_dsp_rproc_unprepare;
+ rproc->ops->prepare = k3_rproc_prepare;
+ rproc->ops->unprepare = k3_rproc_unprepare;
}
kproc = rproc->priv;
kproc->rproc = rproc;
kproc->dev = dev;
kproc->data = data;
- ret = k3_dsp_rproc_request_mbox(rproc);
+ ret = k3_rproc_request_mbox(rproc);
if (ret)
return ret;
@@ -671,15 +129,15 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
return ret;
}
- ret = devm_add_action_or_reset(dev, k3_dsp_release_tsp, kproc->tsp);
+ ret = devm_add_action_or_reset(dev, k3_release_tsp, kproc->tsp);
if (ret)
return ret;
- ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
+ ret = k3_rproc_of_get_memories(pdev, kproc);
if (ret)
return ret;
- ret = k3_dsp_reserved_mem_init(kproc);
+ ret = k3_reserved_mem_init(kproc);
if (ret)
return dev_err_probe(dev, ret, "reserved memory init failed\n");
@@ -692,30 +150,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
if (p_state) {
dev_info(dev, "configured DSP for IPC-only mode\n");
rproc->state = RPROC_DETACHED;
- /* override rproc ops with only required IPC-only mode ops */
- rproc->ops->prepare = NULL;
- rproc->ops->unprepare = NULL;
- rproc->ops->start = NULL;
- rproc->ops->stop = NULL;
- rproc->ops->attach = k3_dsp_rproc_attach;
- rproc->ops->detach = k3_dsp_rproc_detach;
- rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
} else {
dev_info(dev, "configured DSP for remoteproc mode\n");
- /*
- * ensure the DSP local reset is asserted to ensure the DSP
- * doesn't execute bogus code in .prepare() when the module
- * reset is released.
- */
- if (data->uses_lreset) {
- ret = reset_control_status(kproc->reset);
- if (ret < 0) {
- return dev_err_probe(dev, ret, "failed to get reset status\n");
- } else if (ret == 0) {
- dev_warn(dev, "local reset is deasserted for device\n");
- k3_dsp_rproc_reset(kproc);
- }
- }
}
ret = devm_rproc_add(dev, rproc);
@@ -729,7 +165,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
static void k3_dsp_rproc_remove(struct platform_device *pdev)
{
- struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+ struct k3_rproc *kproc = platform_get_drvdata(pdev);
struct rproc *rproc = kproc->rproc;
struct device *dev = &pdev->dev;
int ret;
@@ -743,37 +179,37 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev)
mbox_free_channel(kproc->mbox);
}
-static const struct k3_dsp_mem_data c66_mems[] = {
+static const struct k3_rproc_mem_data c66_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
{ .name = "l1pram", .dev_addr = 0xe00000 },
{ .name = "l1dram", .dev_addr = 0xf00000 },
};
/* C71x cores only have a L1P Cache, there are no L1P SRAMs */
-static const struct k3_dsp_mem_data c71_mems[] = {
+static const struct k3_rproc_mem_data c71_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
{ .name = "l1dram", .dev_addr = 0xe00000 },
};
-static const struct k3_dsp_mem_data c7xv_mems[] = {
+static const struct k3_rproc_mem_data c7xv_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
};
-static const struct k3_dsp_dev_data c66_data = {
+static const struct k3_rproc_dev_data c66_data = {
.mems = c66_mems,
.num_mems = ARRAY_SIZE(c66_mems),
.boot_align_addr = SZ_1K,
.uses_lreset = true,
};
-static const struct k3_dsp_dev_data c71_data = {
+static const struct k3_rproc_dev_data c71_data = {
.mems = c71_mems,
.num_mems = ARRAY_SIZE(c71_mems),
.boot_align_addr = SZ_2M,
.uses_lreset = false,
};
-static const struct k3_dsp_dev_data c7xv_data = {
+static const struct k3_rproc_dev_data c7xv_data = {
.mems = c7xv_mems,
.num_mems = ARRAY_SIZE(c7xv_mems),
.boot_align_addr = SZ_2M,
diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c
index a16fb165fced..3a11fd24eb52 100644
--- a/drivers/remoteproc/ti_k3_m4_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c
@@ -19,552 +19,35 @@
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
-
-#define K3_M4_IRAM_DEV_ADDR 0x00000
-#define K3_M4_DRAM_DEV_ADDR 0x30000
-
-/**
- * struct k3_m4_rproc_mem - internal memory structure
- * @cpu_addr: MPU virtual address of the memory region
- * @bus_addr: Bus address used to access the memory region
- * @dev_addr: Device address of the memory region from remote processor view
- * @size: Size of the memory region
- */
-struct k3_m4_rproc_mem {
- void __iomem *cpu_addr;
- phys_addr_t bus_addr;
- u32 dev_addr;
- size_t size;
-};
-
-/**
- * struct k3_m4_rproc_mem_data - memory definitions for a remote processor
- * @name: name for this memory entry
- * @dev_addr: device address for the memory entry
- */
-struct k3_m4_rproc_mem_data {
- const char *name;
- const u32 dev_addr;
-};
-
-/**
- * struct k3_m4_rproc - k3 remote processor driver structure
- * @dev: cached device pointer
- * @mem: internal memory regions data
- * @num_mems: number of internal memory regions
- * @rmem: reserved memory regions data
- * @num_rmems: number of reserved memory regions
- * @reset: reset control handle
- * @tsp: TI-SCI processor control handle
- * @ti_sci: TI-SCI handle
- * @ti_sci_id: TI-SCI device identifier
- * @mbox: mailbox channel handle
- * @client: mailbox client to request the mailbox channel
- */
-struct k3_m4_rproc {
- struct device *dev;
- struct k3_m4_rproc_mem *mem;
- int num_mems;
- struct k3_m4_rproc_mem *rmem;
- int num_rmems;
- struct reset_control *reset;
- struct ti_sci_proc *tsp;
- const struct ti_sci_handle *ti_sci;
- u32 ti_sci_id;
- struct mbox_chan *mbox;
- struct mbox_client client;
-};
-
-/**
- * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
- * @client: mailbox client pointer used for requesting the mailbox channel
- * @data: mailbox payload
- *
- * This handler is invoked by the K3 mailbox driver whenever a mailbox
- * message is received. Usually, the mailbox payload simply contains
- * the index of the virtqueue that is kicked by the remote processor,
- * and we let remoteproc core handle it.
- *
- * In addition to virtqueue indices, we also have some out-of-band values
- * that indicate different events. Those values are deliberately very
- * large so they don't coincide with virtqueue indices.
- */
-static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data)
-{
- struct device *dev = client->dev;
- struct rproc *rproc = dev_get_drvdata(dev);
- u32 msg = (u32)(uintptr_t)(data);
-
- dev_dbg(dev, "mbox msg: 0x%x\n", msg);
-
- switch (msg) {
- case RP_MBOX_CRASH:
- /*
- * remoteproc detected an exception, but error recovery is not
- * supported. So, just log this for now
- */
- dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
- break;
- case RP_MBOX_ECHO_REPLY:
- dev_info(dev, "received echo reply from %s\n", rproc->name);
- break;
- default:
- /* silently handle all other valid messages */
- if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
- return;
- if (msg > rproc->max_notifyid) {
- dev_dbg(dev, "dropping unknown message 0x%x", msg);
- return;
- }
- /* msg contains the index of the triggered vring */
- if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
- dev_dbg(dev, "no message was found in vqid %d\n", msg);
- }
-}
-
-/*
- * Kick the remote processor to notify about pending unprocessed messages.
- * The vqid usage is not used and is inconsequential, as the kick is performed
- * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
- * the remote processor is expected to process both its Tx and Rx virtqueues.
- */
-static void k3_m4_rproc_kick(struct rproc *rproc, int vqid)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- u32 msg = (u32)vqid;
- int ret;
-
- /*
- * Send the index of the triggered virtqueue in the mailbox payload.
- * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
- * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
- */
- ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
- if (ret < 0)
- dev_err(dev, "failed to send mailbox message, status = %d\n",
- ret);
-}
-
-static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- int ret;
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * The M4 cores have a local reset that affects only the CPU, and a
- * generic module reset that powers on the device and allows the internal
- * memories to be accessed while the local reset is asserted. This function is
- * used to release the global reset on remote cores to allow loading into the
- * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
- * firmware loading, and is followed by the .start() ops after loading to
- * actually let the remote cores to run.
- */
-static int k3_m4_rproc_prepare(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- /* If the core is running already no need to deassert the module reset */
- if (rproc->state == RPROC_DETACHED)
- return 0;
-
- /*
- * Ensure the local reset is asserted so the core doesn't
- * execute bogus code when the module reset is released.
- */
- ret = reset_control_assert(kproc->reset);
- if (ret) {
- dev_err(dev, "could not assert local reset\n");
- return ret;
- }
-
- ret = reset_control_status(kproc->reset);
- if (ret <= 0) {
- dev_err(dev, "local reset still not asserted\n");
- return ret;
- }
-
- ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
- return ret;
- }
-
- return 0;
-}
-
-/*
- * This function implements the .unprepare() ops and performs the complimentary
- * operations to that of the .prepare() ops. The function is used to assert the
- * global reset on applicable cores. This completes the second portion of
- * powering down the remote core. The cores themselves are only halted in the
- * .stop() callback through the local reset, and the .unprepare() ops is invoked
- * by the remoteproc core after the remoteproc is stopped to balance the global
- * reset.
- */
-static int k3_m4_rproc_unprepare(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- /* If the core is going to be detached do not assert the module reset */
- if (rproc->state == RPROC_ATTACHED)
- return 0;
-
- ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "module-reset assert failed\n");
- return ret;
- }
-
- return 0;
-}
-
-/*
- * This function implements the .get_loaded_rsc_table() callback and is used
- * to provide the resource table for a booted remote processor in IPC-only
- * mode. The remote processor firmwares follow a design-by-contract approach
- * and are expected to have the resource table at the base of the DDR region
- * reserved for firmware usage. This provides flexibility for the remote
- * processor to be booted by different bootloaders that may or may not have the
- * ability to publish the resource table address and size through a DT
- * property.
- */
-static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc,
- size_t *rsc_table_sz)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- if (!kproc->rmem[0].cpu_addr) {
- dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * NOTE: The resource table size is currently hard-coded to a maximum
- * of 256 bytes. The most common resource table usage for K3 firmwares
- * is to only have the vdev resource entry and an optional trace entry.
- * The exact size could be computed based on resource table address, but
- * the hard-coded value suffices to support the IPC-only mode.
- */
- *rsc_table_sz = 256;
- return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
-}
-
-/*
- * Custom function to translate a remote processor device address (internal
- * RAMs only) to a kernel virtual address. The remote processors can access
- * their RAMs at either an internal address visible only from a remote
- * processor, or at the SoC-level bus address. Both these addresses need to be
- * looked through for translation. The translated addresses can be used either
- * by the remoteproc core for loading (when using kernel remoteproc loader), or
- * by any rpmsg bus drivers.
- */
-static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- void __iomem *va = NULL;
- phys_addr_t bus_addr;
- u32 dev_addr, offset;
- size_t size;
- int i;
-
- if (len == 0)
- return NULL;
-
- for (i = 0; i < kproc->num_mems; i++) {
- bus_addr = kproc->mem[i].bus_addr;
- dev_addr = kproc->mem[i].dev_addr;
- size = kproc->mem[i].size;
-
- /* handle M4-view addresses */
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
-
- /* handle SoC-view addresses */
- if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
- offset = da - bus_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- /* handle static DDR reserved memory regions */
- for (i = 0; i < kproc->num_rmems; i++) {
- dev_addr = kproc->rmem[i].dev_addr;
- size = kproc->rmem[i].size;
-
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->rmem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- return NULL;
-}
-
-static int k3_m4_rproc_of_get_memories(struct platform_device *pdev,
- struct k3_m4_rproc *kproc)
-{
- static const char * const mem_names[] = { "iram", "dram" };
- static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR };
- struct device *dev = &pdev->dev;
- struct resource *res;
- int num_mems;
- int i;
-
- num_mems = ARRAY_SIZE(mem_names);
- kproc->mem = devm_kcalloc(kproc->dev, num_mems,
- sizeof(*kproc->mem), GFP_KERNEL);
- if (!kproc->mem)
- return -ENOMEM;
-
- for (i = 0; i < num_mems; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- mem_names[i]);
- if (!res) {
- dev_err(dev, "found no memory resource for %s\n",
- mem_names[i]);
- return -EINVAL;
- }
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res),
- dev_name(dev))) {
- dev_err(dev, "could not request %s region for resource\n",
- mem_names[i]);
- return -EBUSY;
- }
-
- kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
- resource_size(res));
- if (!kproc->mem[i].cpu_addr) {
- dev_err(dev, "failed to map %s memory\n",
- mem_names[i]);
- return -ENOMEM;
- }
- kproc->mem[i].bus_addr = res->start;
- kproc->mem[i].dev_addr = mem_addrs[i];
- kproc->mem[i].size = resource_size(res);
-
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- mem_names[i], &kproc->mem[i].bus_addr,
- kproc->mem[i].size, kproc->mem[i].cpu_addr,
- kproc->mem[i].dev_addr);
- }
- kproc->num_mems = num_mems;
-
- return 0;
-}
-
-static void k3_m4_rproc_dev_mem_release(void *data)
-{
- struct device *dev = data;
-
- of_reserved_mem_device_release(dev);
-}
-
-static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- struct device_node *np = dev->of_node;
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
- int num_rmems;
- int ret, i;
-
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
- if (num_rmems < 0) {
- dev_err(dev, "device does not reserved memory regions (%d)\n",
- num_rmems);
- return -EINVAL;
- }
- if (num_rmems < 2) {
- dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
- num_rmems);
- return -EINVAL;
- }
-
- /* use reserved memory region 0 for vring DMA allocations */
- ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
- if (ret) {
- dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev);
- if (ret)
- return ret;
-
- num_rmems--;
- kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem)
- return -ENOMEM;
-
- /* use remaining reserved memory regions for static carveouts */
- for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
-
- kproc->rmem[i].bus_addr = rmem->base;
- /* 64-bit address regions currently not supported */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i + 1, &kproc->rmem[i].bus_addr,
- kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
- kproc->rmem[i].dev_addr);
- }
- kproc->num_rmems = num_rmems;
-
- return 0;
-}
-
-static void k3_m4_release_tsp(void *data)
-{
- struct ti_sci_proc *tsp = data;
-
- ti_sci_proc_release(tsp);
-}
-
-/*
- * Power up the M4 remote processor.
- *
- * This function will be invoked only after the firmware for this rproc
- * was loaded, parsed successfully, and all of its resource requirements
- * were met. This callback is invoked only in remoteproc mode.
- */
-static int k3_m4_rproc_start(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = k3_m4_rproc_ping_mbox(kproc);
- if (ret)
- return ret;
-
- ret = reset_control_deassert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Stop the M4 remote processor.
- *
- * This function puts the M4 processor into reset, and finishes processing
- * of any pending messages. This callback is invoked only in remoteproc mode.
- */
-static int k3_m4_rproc_stop(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = reset_control_assert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Attach to a running M4 remote processor (IPC-only mode)
- *
- * The remote processor is already booted, so there is no need to issue any
- * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only
- * mode.
- */
-static int k3_m4_rproc_attach(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- int ret;
-
- ret = k3_m4_rproc_ping_mbox(kproc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * Detach from a running M4 remote processor (IPC-only mode)
- *
- * This rproc detach callback performs the opposite operation to attach
- * callback, the M4 core is not stopped and will be left to continue to
- * run its booted firmware. This callback is invoked only in IPC-only mode.
- */
-static int k3_m4_rproc_detach(struct rproc *rproc)
-{
- return 0;
-}
+#include "ti_k3_common.h"
static const struct rproc_ops k3_m4_rproc_ops = {
- .prepare = k3_m4_rproc_prepare,
- .unprepare = k3_m4_rproc_unprepare,
- .start = k3_m4_rproc_start,
- .stop = k3_m4_rproc_stop,
- .attach = k3_m4_rproc_attach,
- .detach = k3_m4_rproc_detach,
- .kick = k3_m4_rproc_kick,
- .da_to_va = k3_m4_rproc_da_to_va,
- .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table,
+ .prepare = k3_rproc_prepare,
+ .unprepare = k3_rproc_unprepare,
+ .start = k3_rproc_start,
+ .stop = k3_rproc_stop,
+ .attach = k3_rproc_attach,
+ .detach = k3_rproc_detach,
+ .kick = k3_rproc_kick,
+ .da_to_va = k3_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_get_loaded_rsc_table,
};
static int k3_m4_rproc_probe(struct platform_device *pdev)
{
+ const struct k3_rproc_dev_data *data;
struct device *dev = &pdev->dev;
- struct k3_m4_rproc *kproc;
+ struct k3_rproc *kproc;
struct rproc *rproc;
const char *fw_name;
bool r_state = false;
bool p_state = false;
int ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
if (ret)
return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
@@ -578,6 +61,8 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
rproc->recovery_disabled = true;
kproc = rproc->priv;
kproc->dev = dev;
+ kproc->rproc = rproc;
+ kproc->data = data;
platform_set_drvdata(pdev, rproc);
kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
@@ -601,15 +86,15 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
ret = ti_sci_proc_request(kproc->tsp);
if (ret < 0)
return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
- ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp);
+ ret = devm_add_action_or_reset(dev, k3_release_tsp, kproc->tsp);
if (ret)
return ret;
- ret = k3_m4_rproc_of_get_memories(pdev, kproc);
+ ret = k3_rproc_of_get_memories(pdev, kproc);
if (ret)
return ret;
- ret = k3_m4_reserved_mem_init(kproc);
+ ret = k3_reserved_mem_init(kproc);
if (ret)
return dev_err_probe(dev, ret, "reserved memory init failed\n");
@@ -627,15 +112,9 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
dev_info(dev, "configured M4F for remoteproc mode\n");
}
- kproc->client.dev = dev;
- kproc->client.tx_done = NULL;
- kproc->client.rx_callback = k3_m4_rproc_mbox_callback;
- kproc->client.tx_block = false;
- kproc->client.knows_txdone = false;
- kproc->mbox = mbox_request_channel(&kproc->client, 0);
- if (IS_ERR(kproc->mbox))
- return dev_err_probe(dev, PTR_ERR(kproc->mbox),
- "mbox_request_channel failed\n");
+ ret = k3_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
ret = devm_rproc_add(dev, rproc);
if (ret)
@@ -645,8 +124,20 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
return 0;
}
+static const struct k3_rproc_mem_data am64_m4_mems[] = {
+ { .name = "iram", .dev_addr = 0x0 },
+ { .name = "dram", .dev_addr = 0x30000 },
+};
+
+static const struct k3_rproc_dev_data am64_m4_data = {
+ .mems = am64_m4_mems,
+ .num_mems = ARRAY_SIZE(am64_m4_mems),
+ .boot_align_addr = SZ_1K,
+ .uses_lreset = true,
+};
+
static const struct of_device_id k3_m4_of_match[] = {
- { .compatible = "ti,am64-m4fss", },
+ { .compatible = "ti,am64-m4fss", .data = &am64_m4_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_m4_of_match);
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index dbc513c5569c..e34c04c135fc 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -26,6 +26,7 @@
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
+#include "ti_k3_common.h"
/* This address can either be for ATCM or BTCM with the other at address 0x0 */
#define K3_R5_TCM_DEV_ADDR 0x41010000
@@ -55,20 +56,6 @@
/* Applicable to only AM64x SoCs */
#define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200
-/**
- * struct k3_r5_mem - internal memory structure
- * @cpu_addr: MPU virtual address of the memory region
- * @bus_addr: Bus address used to access the memory region
- * @dev_addr: Device address from remoteproc view
- * @size: Size of the memory region
- */
-struct k3_r5_mem {
- void __iomem *cpu_addr;
- phys_addr_t bus_addr;
- u32 dev_addr;
- size_t size;
-};
-
/*
* All cluster mode values are not applicable on all SoCs. The following
* are the modes supported on various SoCs:
@@ -90,12 +77,14 @@ enum cluster_mode {
* @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
* @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
* @is_single_core: flag to denote if SoC/IP has only single core R5
+ * @core_data: pointer to R5-core-specific device data
*/
struct k3_r5_soc_data {
bool tcm_is_double;
bool tcm_ecc_autoinit;
bool single_cpu_mode;
bool is_single_core;
+ const struct k3_rproc_dev_data *core_data;
};
/**
@@ -118,15 +107,10 @@ struct k3_r5_cluster {
* struct k3_r5_core - K3 R5 core structure
* @elem: linked list item
* @dev: cached device pointer
- * @rproc: rproc handle representing this core
- * @mem: internal memory regions data
+ * @kproc: K3 rproc handle representing this core
+ * @cluster: cached pointer to parent cluster structure
* @sram: on-chip SRAM memory regions data
- * @num_mems: number of internal memory regions
* @num_sram: number of on-chip SRAM memory regions
- * @reset: reset control handle
- * @tsp: TI-SCI processor control handle
- * @ti_sci: TI-SCI handle
- * @ti_sci_id: TI-SCI device identifier
* @atcm_enable: flag to control ATCM enablement
* @btcm_enable: flag to control BTCM enablement
* @loczrama: flag to dictate which TCM is at device address 0x0
@@ -135,157 +119,58 @@ struct k3_r5_cluster {
struct k3_r5_core {
struct list_head elem;
struct device *dev;
- struct rproc *rproc;
- struct k3_r5_mem *mem;
- struct k3_r5_mem *sram;
- int num_mems;
+ struct k3_rproc *kproc;
+ struct k3_r5_cluster *cluster;
+ struct k3_rproc_mem *sram;
int num_sram;
- struct reset_control *reset;
- struct ti_sci_proc *tsp;
- const struct ti_sci_handle *ti_sci;
- u32 ti_sci_id;
u32 atcm_enable;
u32 btcm_enable;
u32 loczrama;
bool released_from_reset;
};
-/**
- * struct k3_r5_rproc - K3 remote processor state
- * @dev: cached device pointer
- * @cluster: cached pointer to parent cluster structure
- * @mbox: mailbox channel handle
- * @client: mailbox client to request the mailbox channel
- * @rproc: rproc handle
- * @core: cached pointer to r5 core structure being used
- * @rmem: reserved memory regions data
- * @num_rmems: number of reserved memory regions
- */
-struct k3_r5_rproc {
- struct device *dev;
- struct k3_r5_cluster *cluster;
- struct mbox_chan *mbox;
- struct mbox_client client;
- struct rproc *rproc;
- struct k3_r5_core *core;
- struct k3_r5_mem *rmem;
- int num_rmems;
-};
-
-/**
- * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
- * @client: mailbox client pointer used for requesting the mailbox channel
- * @data: mailbox payload
- *
- * This handler is invoked by the OMAP mailbox driver whenever a mailbox
- * message is received. Usually, the mailbox payload simply contains
- * the index of the virtqueue that is kicked by the remote processor,
- * and we let remoteproc core handle it.
- *
- * In addition to virtqueue indices, we also have some out-of-band values
- * that indicate different events. Those values are deliberately very
- * large so they don't coincide with virtqueue indices.
- */
-static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
-{
- struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
- client);
- struct device *dev = kproc->rproc->dev.parent;
- const char *name = kproc->rproc->name;
- u32 msg = omap_mbox_message(data);
-
- /* Do not forward message from a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- dev_dbg(dev, "mbox msg: 0x%x\n", msg);
-
- switch (msg) {
- case RP_MBOX_CRASH:
- /*
- * remoteproc detected an exception, but error recovery is not
- * supported. So, just log this for now
- */
- dev_err(dev, "K3 R5F rproc %s crashed\n", name);
- break;
- case RP_MBOX_ECHO_REPLY:
- dev_info(dev, "received echo reply from %s\n", name);
- break;
- default:
- /* silently handle all other valid messages */
- if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
- return;
- if (msg > kproc->rproc->max_notifyid) {
- dev_dbg(dev, "dropping unknown message 0x%x", msg);
- return;
- }
- /* msg contains the index of the triggered vring */
- if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
- dev_dbg(dev, "no message was found in vqid %d\n", msg);
- }
-}
-
-/* kick a virtqueue */
-static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = rproc->dev.parent;
- mbox_msg_t msg = (mbox_msg_t)vqid;
- int ret;
-
- /* Do not forward message to a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- /* send the index of the triggered virtqueue in the mailbox payload */
- ret = mbox_send_message(kproc->mbox, (void *)msg);
- if (ret < 0)
- dev_err(dev, "failed to send mailbox message, status = %d\n",
- ret);
-}
-
-static int k3_r5_split_reset(struct k3_r5_core *core)
+static int k3_r5_split_reset(struct k3_rproc *kproc)
{
int ret;
- ret = reset_control_assert(core->reset);
+ ret = reset_control_assert(kproc->reset);
if (ret) {
- dev_err(core->dev, "local-reset assert failed, ret = %d\n",
+ dev_err(kproc->dev, "local-reset assert failed, ret = %d\n",
ret);
return ret;
}
- ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id);
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
- dev_err(core->dev, "module-reset assert failed, ret = %d\n",
+ dev_err(kproc->dev, "module-reset assert failed, ret = %d\n",
ret);
- if (reset_control_deassert(core->reset))
- dev_warn(core->dev, "local-reset deassert back failed\n");
+ if (reset_control_deassert(kproc->reset))
+ dev_warn(kproc->dev, "local-reset deassert back failed\n");
}
return ret;
}
-static int k3_r5_split_release(struct k3_r5_core *core)
+static int k3_r5_split_release(struct k3_rproc *kproc)
{
int ret;
- ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
- core->ti_sci_id);
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
- dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ dev_err(kproc->dev, "module-reset deassert failed, ret = %d\n",
ret);
return ret;
}
- ret = reset_control_deassert(core->reset);
+ ret = reset_control_deassert(kproc->reset);
if (ret) {
- dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
+ dev_err(kproc->dev, "local-reset deassert failed, ret = %d\n",
ret);
- if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id))
- dev_warn(core->dev, "module-reset assert back failed\n");
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(kproc->dev, "module-reset assert back failed\n");
}
return ret;
@@ -294,11 +179,12 @@ static int k3_r5_split_release(struct k3_r5_core *core)
static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
{
struct k3_r5_core *core;
+ struct k3_rproc *kproc;
int ret;
/* assert local reset on all applicable cores */
list_for_each_entry(core, &cluster->cores, elem) {
- ret = reset_control_assert(core->reset);
+ ret = reset_control_assert(core->kproc->reset);
if (ret) {
dev_err(core->dev, "local-reset assert failed, ret = %d\n",
ret);
@@ -309,8 +195,9 @@ static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
/* disable PSC modules on all applicable cores */
list_for_each_entry(core, &cluster->cores, elem) {
- ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id);
+ kproc = core->kproc;
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset assert failed, ret = %d\n",
ret);
@@ -322,14 +209,15 @@ static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
unroll_module_reset:
list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
- if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id))
+ kproc = core->kproc;
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
dev_warn(core->dev, "module-reset assert back failed\n");
}
core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
unroll_local_reset:
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
- if (reset_control_deassert(core->reset))
+ if (reset_control_deassert(core->kproc->reset))
dev_warn(core->dev, "local-reset deassert back failed\n");
}
@@ -339,12 +227,14 @@ unroll_local_reset:
static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
{
struct k3_r5_core *core;
+ struct k3_rproc *kproc;
int ret;
/* enable PSC modules on all applicable cores */
list_for_each_entry_reverse(core, &cluster->cores, elem) {
- ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
- core->ti_sci_id);
+ kproc = core->kproc;
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
ret);
@@ -355,7 +245,7 @@ static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
/* deassert local reset on all applicable cores */
list_for_each_entry_reverse(core, &cluster->cores, elem) {
- ret = reset_control_deassert(core->reset);
+ ret = reset_control_deassert(core->kproc->reset);
if (ret) {
dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
ret);
@@ -367,67 +257,33 @@ static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
unroll_local_reset:
list_for_each_entry_continue(core, &cluster->cores, elem) {
- if (reset_control_assert(core->reset))
+ if (reset_control_assert(core->kproc->reset))
dev_warn(core->dev, "local-reset assert back failed\n");
}
core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
unroll_module_reset:
list_for_each_entry_from(core, &cluster->cores, elem) {
- if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id))
+ kproc = core->kproc;
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
dev_warn(core->dev, "module-reset assert back failed\n");
}
return ret;
}
-static inline int k3_r5_core_halt(struct k3_r5_core *core)
+static inline int k3_r5_core_halt(struct k3_rproc *kproc)
{
- return ti_sci_proc_set_control(core->tsp,
+ return ti_sci_proc_set_control(kproc->tsp,
PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
}
-static inline int k3_r5_core_run(struct k3_r5_core *core)
+static inline int k3_r5_core_run(struct k3_rproc *kproc)
{
- return ti_sci_proc_set_control(core->tsp,
+ return ti_sci_proc_set_control(kproc->tsp,
0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
}
-static int k3_r5_rproc_request_mbox(struct rproc *rproc)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct mbox_client *client = &kproc->client;
- struct device *dev = kproc->dev;
- int ret;
-
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_r5_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox))
- return dev_err_probe(dev, PTR_ERR(kproc->mbox),
- "mbox_request_channel failed\n");
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- mbox_free_channel(kproc->mbox);
- return ret;
- }
-
- return 0;
-}
-
/*
* The R5F cores have controls for both a reset and a halt/run. The code
* execution from DDR requires the initial boot-strapping code to be run
@@ -446,16 +302,39 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
*/
static int k3_r5_rproc_prepare(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv, *core0, *core1;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
u32 ctrl = 0, cfg = 0, stat = 0;
u64 boot_vec = 0;
bool mem_init_dis;
int ret;
- ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
+ /*
+ * R5 cores require to be powered on sequentially, core0 should be in
+ * higher power state than core1 in a cluster. So, wait for core0 to
+ * power up before proceeding to core1 and put timeout of 2sec. This
+ * waiting mechanism is necessary because rproc_auto_boot_callback() for
+ * core1 can be called before core0 due to thread execution order.
+ *
+ * By placing the wait mechanism here in .prepare() ops, this condition
+ * is enforced for rproc boot requests from sysfs as well.
+ */
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1 &&
+ !core0->released_from_reset) {
+ ret = wait_event_interruptible_timeout(cluster->core_transition,
+ core0->released_from_reset,
+ msecs_to_jiffies(2000));
+ if (ret <= 0) {
+ dev_err(dev, "can not power up core1 before core0");
+ return -EPERM;
+ }
+ }
+
+ ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl, &stat);
if (ret < 0)
return ret;
mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
@@ -463,7 +342,7 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
/* Re-use LockStep-mode reset logic for Single-CPU mode */
ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU) ?
- k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
+ k3_r5_lockstep_release(cluster) : k3_r5_split_release(kproc);
if (ret) {
dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
ret);
@@ -471,6 +350,14 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
}
/*
+ * Notify all threads in the wait queue when core0 state has changed so
+ * that threads waiting for this condition can be executed.
+ */
+ core->released_from_reset = true;
+ if (core == core0)
+ wake_up_interruptible(&cluster->core_transition);
+
+ /*
* Newer IP revisions like on J7200 SoCs support h/w auto-initialization
* of TCMs, so there is no need to perform the s/w memzero. This bit is
* configurable through System Firmware, the default value does perform
@@ -487,10 +374,10 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
* can be effective on all TCM addresses.
*/
dev_dbg(dev, "zeroing out ATCM memory\n");
- memset_io(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
+ memset_io(kproc->mem[0].cpu_addr, 0x00, kproc->mem[0].size);
dev_dbg(dev, "zeroing out BTCM memory\n");
- memset_io(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
+ memset_io(kproc->mem[1].cpu_addr, 0x00, kproc->mem[1].size);
return 0;
}
@@ -513,19 +400,47 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
*/
static int k3_r5_rproc_unprepare(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv, *core0, *core1;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
int ret;
+ /*
+ * Ensure power-down of cores is sequential in split mode. Core1 must
+ * power down before Core0 to maintain the expected state. By placing
+ * the wait mechanism here in .unprepare() ops, this condition is
+ * enforced for rproc stop or shutdown requests from sysfs and device
+ * removal as well.
+ */
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core0 &&
+ core1->released_from_reset) {
+ ret = wait_event_interruptible_timeout(cluster->core_transition,
+ !core1->released_from_reset,
+ msecs_to_jiffies(2000));
+ if (ret <= 0) {
+ dev_err(dev, "can not power down core0 before core1");
+ return -EPERM;
+ }
+ }
+
/* Re-use LockStep-mode reset logic for Single-CPU mode */
ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU) ?
- k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
+ k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(kproc);
if (ret)
dev_err(dev, "unable to disable cores, ret = %d\n", ret);
+ /*
+ * Notify all threads in the wait queue when core1 state has changed so
+ * that threads waiting for this condition can be executed.
+ */
+ core->released_from_reset = false;
+ if (core == core1)
+ wake_up_interruptible(&cluster->core_transition);
+
return ret;
}
@@ -548,10 +463,10 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
*/
static int k3_r5_rproc_start(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
- struct k3_r5_core *core0, *core;
u32 boot_addr;
int ret;
@@ -560,41 +475,28 @@ static int k3_r5_rproc_start(struct rproc *rproc)
dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
/* boot vector need not be programmed for Core1 in LockStep mode */
- core = kproc->core;
- ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
+ ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
if (ret)
return ret;
/* unhalt/run all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
list_for_each_entry_reverse(core, &cluster->cores, elem) {
- ret = k3_r5_core_run(core);
+ ret = k3_r5_core_run(core->kproc);
if (ret)
goto unroll_core_run;
}
} else {
- /* do not allow core 1 to start before core 0 */
- core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
- elem);
- if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
- dev_err(dev, "%s: can not start core 1 before core 0\n",
- __func__);
- return -EPERM;
- }
-
- ret = k3_r5_core_run(core);
+ ret = k3_r5_core_run(core->kproc);
if (ret)
return ret;
-
- core->released_from_reset = true;
- wake_up_interruptible(&cluster->core_transition);
}
return 0;
unroll_core_run:
list_for_each_entry_continue(core, &cluster->cores, elem) {
- if (k3_r5_core_halt(core))
+ if (k3_r5_core_halt(core->kproc))
dev_warn(core->dev, "core halt back failed\n");
}
return ret;
@@ -626,33 +528,22 @@ unroll_core_run:
*/
static int k3_r5_rproc_stop(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct device *dev = kproc->dev;
- struct k3_r5_core *core1, *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
int ret;
/* halt all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
list_for_each_entry(core, &cluster->cores, elem) {
- ret = k3_r5_core_halt(core);
+ ret = k3_r5_core_halt(core->kproc);
if (ret) {
core = list_prev_entry(core, elem);
goto unroll_core_halt;
}
}
} else {
- /* do not allow core 0 to stop before core 1 */
- core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
- elem);
- if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
- dev_err(dev, "%s: can not stop core 0 before core 1\n",
- __func__);
- ret = -EPERM;
- goto out;
- }
-
- ret = k3_r5_core_halt(core);
+ ret = k3_r5_core_halt(core->kproc);
if (ret)
goto out;
}
@@ -661,7 +552,7 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
unroll_core_halt:
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
- if (k3_r5_core_run(core))
+ if (k3_r5_core_run(core->kproc))
dev_warn(core->dev, "core run back failed\n");
}
out:
@@ -669,58 +560,6 @@ out:
}
/*
- * Attach to a running R5F remote processor (IPC-only mode)
- *
- * The R5F attach callback is a NOP. The remote processor is already booted, and
- * all required resources have been acquired during probe routine, so there is
- * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
- * This callback is invoked only in IPC-only mode and exists because
- * rproc_validate() checks for its existence.
- */
-static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
-
-/*
- * Detach from a running R5F remote processor (IPC-only mode)
- *
- * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
- * left in booted state in IPC-only mode. This callback is invoked only in
- * IPC-only mode and exists for sanity sake.
- */
-static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
-
-/*
- * This function implements the .get_loaded_rsc_table() callback and is used
- * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F
- * firmwares follow a design-by-contract approach and are expected to have the
- * resource table at the base of the DDR region reserved for firmware usage.
- * This provides flexibility for the remote processor to be booted by different
- * bootloaders that may or may not have the ability to publish the resource table
- * address and size through a DT property. This callback is invoked only in
- * IPC-only mode.
- */
-static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
- size_t *rsc_table_sz)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- if (!kproc->rmem[0].cpu_addr) {
- dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * NOTE: The resource table size is currently hard-coded to a maximum
- * of 256 bytes. The most common resource table usage for K3 firmwares
- * is to only have the vdev resource entry and an optional trace entry.
- * The exact size could be computed based on resource table address, but
- * the hard-coded value suffices to support the IPC-only mode.
- */
- *rsc_table_sz = 256;
- return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
-}
-
-/*
* Internal Memory translation helper
*
* Custom function implementing the rproc .da_to_va ops to provide address
@@ -730,10 +569,9 @@ static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
*/
static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_core *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv;
void __iomem *va = NULL;
- phys_addr_t bus_addr;
u32 dev_addr, offset;
size_t size;
int i;
@@ -741,27 +579,6 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool
if (len == 0)
return NULL;
- /* handle both R5 and SoC views of ATCM and BTCM */
- for (i = 0; i < core->num_mems; i++) {
- bus_addr = core->mem[i].bus_addr;
- dev_addr = core->mem[i].dev_addr;
- size = core->mem[i].size;
-
- /* handle R5-view addresses of TCMs */
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = core->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
-
- /* handle SoC-view addresses of TCMs */
- if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
- offset = da - bus_addr;
- va = core->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
/* handle any SRAM regions using SoC-view addresses */
for (i = 0; i < core->num_sram; i++) {
dev_addr = core->sram[i].dev_addr;
@@ -774,19 +591,8 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool
}
}
- /* handle static DDR reserved memory regions */
- for (i = 0; i < kproc->num_rmems; i++) {
- dev_addr = kproc->rmem[i].dev_addr;
- size = kproc->rmem[i].size;
-
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->rmem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- return NULL;
+ /* handle both TCM and DDR memory regions */
+ return k3_rproc_da_to_va(rproc, da, len, is_iomem);
}
static const struct rproc_ops k3_r5_rproc_ops = {
@@ -794,7 +600,7 @@ static const struct rproc_ops k3_r5_rproc_ops = {
.unprepare = k3_r5_rproc_unprepare,
.start = k3_r5_rproc_start,
.stop = k3_r5_rproc_stop,
- .kick = k3_r5_rproc_kick,
+ .kick = k3_rproc_kick,
.da_to_va = k3_r5_rproc_da_to_va,
};
@@ -833,11 +639,11 @@ static const struct rproc_ops k3_r5_rproc_ops = {
* both the cores with the same settings, before reconfiguing again for
* LockStep mode.
*/
-static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
+static int k3_r5_rproc_configure(struct k3_rproc *kproc)
{
- struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *temp, *core0, *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
- struct k3_r5_core *core0, *core, *temp;
u32 ctrl = 0, cfg = 0, stat = 0;
u32 set_cfg = 0, clr_cfg = 0;
u64 boot_vec = 0;
@@ -851,10 +657,10 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
cluster->mode == CLUSTER_MODE_SINGLECORE) {
core = core0;
} else {
- core = kproc->core;
+ core = kproc->priv;
}
- ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ ret = ti_sci_proc_get_status(core->kproc->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0)
return ret;
@@ -924,7 +730,7 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
* and TEINIT config is only allowed with Core0.
*/
list_for_each_entry(temp, &cluster->cores, elem) {
- ret = k3_r5_core_halt(temp);
+ ret = k3_r5_core_halt(temp->kproc);
if (ret)
goto out;
@@ -932,7 +738,7 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
}
- ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
+ ret = ti_sci_proc_set_config(temp->kproc->tsp, boot_vec,
set_cfg, clr_cfg);
if (ret)
goto out;
@@ -940,14 +746,14 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
clr_cfg = 0;
- ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec,
set_cfg, clr_cfg);
} else {
- ret = k3_r5_core_halt(core);
+ ret = k3_r5_core_halt(core->kproc);
if (ret)
goto out;
- ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec,
set_cfg, clr_cfg);
}
@@ -955,93 +761,6 @@ out:
return ret;
}
-static void k3_r5_mem_release(void *data)
-{
- struct device *dev = data;
-
- of_reserved_mem_device_release(dev);
-}
-
-static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- struct device_node *np = dev_of_node(dev);
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
- int num_rmems;
- int ret, i;
-
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
- if (num_rmems <= 0) {
- dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
- num_rmems);
- return -EINVAL;
- }
- if (num_rmems < 2) {
- dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
- num_rmems);
- return -EINVAL;
- }
-
- /* use reserved memory region 0 for vring DMA allocations */
- ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
- if (ret) {
- dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
- ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev);
- if (ret)
- return ret;
-
- num_rmems--;
- kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem)
- return -ENOMEM;
-
- /* use remaining reserved memory regions for static carveouts */
- for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
-
- kproc->rmem[i].bus_addr = rmem->base;
- /*
- * R5Fs do not have an MMU, but have a Region Address Translator
- * (RAT) module that provides a fixed entry translation between
- * the 32-bit processor addresses to 64-bit bus addresses. The
- * RAT is programmable only by the R5F cores. Support for RAT
- * is currently not supported, so 64-bit address regions are not
- * supported. The absence of MMUs implies that the R5F device
- * addresses/supported memory regions are restricted to 32-bit
- * bus addresses, and are identical
- */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i + 1, &kproc->rmem[i].bus_addr,
- kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
- kproc->rmem[i].dev_addr);
- }
- kproc->num_rmems = num_rmems;
-
- return 0;
-}
-
/*
* Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
* split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
@@ -1055,12 +774,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
* supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only
* half the original size in Split mode.
*/
-static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
+static void k3_r5_adjust_tcm_sizes(struct k3_rproc *kproc)
{
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_r5_core *core0, *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *cdev = core->dev;
- struct k3_r5_core *core0;
if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU ||
@@ -1070,14 +788,14 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
if (core == core0) {
- WARN_ON(core->mem[0].size != SZ_64K);
- WARN_ON(core->mem[1].size != SZ_64K);
+ WARN_ON(kproc->mem[0].size != SZ_64K);
+ WARN_ON(kproc->mem[1].size != SZ_64K);
- core->mem[0].size /= 2;
- core->mem[1].size /= 2;
+ kproc->mem[0].size /= 2;
+ kproc->mem[1].size /= 2;
dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
- core->mem[0].size, core->mem[1].size);
+ kproc->mem[0].size, kproc->mem[1].size);
}
}
@@ -1094,24 +812,23 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
* actual values configured by bootloader. The driver internal device memory
* addresses for TCMs are also updated.
*/
-static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+static int k3_r5_rproc_configure_mode(struct k3_rproc *kproc)
{
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_r5_core *core0, *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *cdev = core->dev;
bool r_state = false, c_state = false, lockstep_en = false, single_cpu = false;
u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
u64 boot_vec = 0;
u32 atcm_enable, btcm_enable, loczrama;
- struct k3_r5_core *core0;
enum cluster_mode mode = cluster->mode;
int reset_ctrl_status;
int ret;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
- ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
- &r_state, &c_state);
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ &r_state, &c_state);
if (ret) {
dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
ret);
@@ -1122,7 +839,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
r_state, c_state);
}
- reset_ctrl_status = reset_control_status(core->reset);
+ reset_ctrl_status = reset_control_status(kproc->reset);
if (reset_ctrl_status < 0) {
dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
reset_ctrl_status);
@@ -1135,7 +852,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
*/
core->released_from_reset = c_state;
- ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0) {
dev_err(cdev, "failed to get initial processor status, ret = %d\n",
@@ -1170,10 +887,10 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
kproc->rproc->ops->unprepare = NULL;
kproc->rproc->ops->start = NULL;
kproc->rproc->ops->stop = NULL;
- kproc->rproc->ops->attach = k3_r5_rproc_attach;
- kproc->rproc->ops->detach = k3_r5_rproc_detach;
+ kproc->rproc->ops->attach = k3_rproc_attach;
+ kproc->rproc->ops->detach = k3_rproc_detach;
kproc->rproc->ops->get_loaded_rsc_table =
- k3_r5_get_loaded_rsc_table;
+ k3_get_loaded_rsc_table;
} else if (!c_state) {
dev_info(cdev, "configured R5F for remoteproc mode\n");
ret = 0;
@@ -1192,19 +909,121 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
core->atcm_enable = atcm_enable;
core->btcm_enable = btcm_enable;
core->loczrama = loczrama;
- core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
- core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
+ kproc->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
+ kproc->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
}
return ret;
}
+static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
+ struct k3_rproc *kproc)
+{
+ const struct k3_rproc_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct k3_r5_core *core = kproc->priv;
+ int num_mems;
+ int i, ret;
+
+ num_mems = data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems, sizeof(*kproc->mem),
+ GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ ret = k3_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_mems; i++) {
+ /*
+ * TODO:
+ * The R5F cores can place ATCM & BTCM anywhere in its address
+ * based on the corresponding Region Registers in the System
+ * Control coprocessor. For now, place ATCM and BTCM at
+ * addresses 0 and 0x41010000 (same as the bus address on AM65x
+ * SoCs) based on loczrama setting overriding default assignment
+ * done by k3_rproc_of_get_memories().
+ */
+ if (!strcmp(data->mems[i].name, "atcm")) {
+ kproc->mem[i].dev_addr = core->loczrama ?
+ 0 : K3_R5_TCM_DEV_ADDR;
+ } else {
+ kproc->mem[i].dev_addr = core->loczrama ?
+ K3_R5_TCM_DEV_ADDR : 0;
+ }
+
+ dev_dbg(dev, "Updating bus addr %pa of memory %5s\n",
+ &kproc->mem[i].bus_addr, data->mems[i].name);
+ }
+
+ return 0;
+}
+
+static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
+ struct k3_r5_core *core)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *sram_np;
+ struct resource res;
+ int num_sram;
+ int i, ret;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
+ num_sram);
+ return 0;
+ }
+
+ core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
+ if (!core->sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np)
+ return -EINVAL;
+
+ if (!of_device_is_available(sram_np)) {
+ of_node_put(sram_np);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &res);
+ of_node_put(sram_np);
+ if (ret)
+ return -EINVAL;
+
+ core->sram[i].bus_addr = res.start;
+ core->sram[i].dev_addr = res.start;
+ core->sram[i].size = resource_size(&res);
+ core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
+ resource_size(&res));
+ if (!core->sram[i].cpu_addr) {
+ dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
+ i, &res.start);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i, &core->sram[i].bus_addr,
+ core->sram[i].size, core->sram[i].cpu_addr,
+ core->sram[i].dev_addr);
+ }
+ core->num_sram = num_sram;
+
+ return 0;
+}
+
static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- struct k3_r5_rproc *kproc;
+ struct k3_rproc *kproc;
struct k3_r5_core *core, *core1;
+ struct device_node *np;
struct device *cdev;
const char *fw_name;
struct rproc *rproc;
@@ -1213,6 +1032,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
list_for_each_entry(core, &cluster->cores, elem) {
cdev = core->dev;
+ np = dev_of_node(cdev);
ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
if (ret) {
dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
@@ -1233,13 +1053,66 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
rproc->recovery_disabled = true;
kproc = rproc->priv;
- kproc->cluster = cluster;
- kproc->core = core;
+ kproc->priv = core;
kproc->dev = cdev;
kproc->rproc = rproc;
- core->rproc = rproc;
+ kproc->data = cluster->soc_data->core_data;
+ core->kproc = kproc;
+
+ kproc->ti_sci = devm_ti_sci_get_by_phandle(cdev, "ti,sci");
+ if (IS_ERR(kproc->ti_sci)) {
+ ret = dev_err_probe(cdev, PTR_ERR(kproc->ti_sci),
+ "failed to get ti-sci handle\n");
+ kproc->ti_sci = NULL;
+ goto out;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret) {
+ dev_err(cdev, "missing 'ti,sci-dev-id' property\n");
+ goto out;
+ }
- ret = k3_r5_rproc_request_mbox(rproc);
+ kproc->reset = devm_reset_control_get_exclusive(cdev, NULL);
+ if (IS_ERR_OR_NULL(kproc->reset)) {
+ ret = PTR_ERR_OR_ZERO(kproc->reset);
+ if (!ret)
+ ret = -ENODEV;
+ dev_err_probe(cdev, ret, "failed to get reset handle\n");
+ goto out;
+ }
+
+ kproc->tsp = ti_sci_proc_of_get_tsp(cdev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp)) {
+ ret = dev_err_probe(cdev, PTR_ERR(kproc->tsp),
+ "failed to construct ti-sci proc control\n");
+ goto out;
+ }
+
+ ret = k3_r5_core_of_get_internal_memories(to_platform_device(cdev), kproc);
+ if (ret) {
+ dev_err(cdev, "failed to get internal memories, ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0) {
+ dev_err(cdev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto out;
+ }
+
+ ret = devm_add_action_or_reset(cdev, k3_release_tsp, kproc->tsp);
+ if (ret)
+ goto out;
+ }
+
+ list_for_each_entry(core, &cluster->cores, elem) {
+ cdev = core->dev;
+ kproc = core->kproc;
+ rproc = kproc->rproc;
+
+ ret = k3_rproc_request_mbox(rproc);
if (ret)
return ret;
@@ -1251,7 +1124,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
ret = k3_r5_rproc_configure(kproc);
if (ret) {
- dev_err(dev, "initial configure failed, ret = %d\n",
+ dev_err(cdev, "initial configure failed, ret = %d\n",
ret);
goto out;
}
@@ -1259,16 +1132,16 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
init_rmem:
k3_r5_adjust_tcm_sizes(kproc);
- ret = k3_r5_reserved_mem_init(kproc);
+ ret = k3_reserved_mem_init(kproc);
if (ret) {
- dev_err(dev, "reserved memory init failed, ret = %d\n",
+ dev_err(cdev, "reserved memory init failed, ret = %d\n",
ret);
goto out;
}
- ret = devm_rproc_add(dev, rproc);
+ ret = devm_rproc_add(cdev, rproc);
if (ret) {
- dev_err_probe(dev, ret, "rproc_add failed\n");
+ dev_err_probe(cdev, ret, "rproc_add failed\n");
goto out;
}
@@ -1279,26 +1152,6 @@ init_rmem:
cluster->mode == CLUSTER_MODE_SINGLECPU ||
cluster->mode == CLUSTER_MODE_SINGLECORE)
break;
-
- /*
- * R5 cores require to be powered on sequentially, core0
- * should be in higher power state than core1 in a cluster
- * So, wait for current core to power up before proceeding
- * to next core and put timeout of 2sec for each core.
- *
- * This waiting mechanism is necessary because
- * rproc_auto_boot_callback() for core1 can be called before
- * core0 due to thread execution order.
- */
- ret = wait_event_interruptible_timeout(cluster->core_transition,
- core->released_from_reset,
- msecs_to_jiffies(2000));
- if (ret <= 0) {
- dev_err(dev,
- "Timed out waiting for %s core to power up!\n",
- rproc->name);
- goto out;
- }
}
return 0;
@@ -1317,8 +1170,8 @@ out:
/* undo core0 upon any failures on core1 in split-mode */
if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
core = list_prev_entry(core, elem);
- rproc = core->rproc;
- kproc = rproc->priv;
+ kproc = core->kproc;
+ rproc = kproc->rproc;
goto err_split;
}
return ret;
@@ -1327,7 +1180,7 @@ out:
static void k3_r5_cluster_rproc_exit(void *data)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(data);
- struct k3_r5_rproc *kproc;
+ struct k3_rproc *kproc;
struct k3_r5_core *core;
struct rproc *rproc;
int ret;
@@ -1343,8 +1196,8 @@ static void k3_r5_cluster_rproc_exit(void *data)
list_last_entry(&cluster->cores, struct k3_r5_core, elem);
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
- rproc = core->rproc;
- kproc = rproc->priv;
+ kproc = core->kproc;
+ rproc = kproc->rproc;
if (rproc->state == RPROC_ATTACHED) {
ret = rproc_detach(rproc);
@@ -1358,142 +1211,6 @@ static void k3_r5_cluster_rproc_exit(void *data)
}
}
-static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
- struct k3_r5_core *core)
-{
- static const char * const mem_names[] = {"atcm", "btcm"};
- struct device *dev = &pdev->dev;
- struct resource *res;
- int num_mems;
- int i;
-
- num_mems = ARRAY_SIZE(mem_names);
- core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
- if (!core->mem)
- return -ENOMEM;
-
- for (i = 0; i < num_mems; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- mem_names[i]);
- if (!res) {
- dev_err(dev, "found no memory resource for %s\n",
- mem_names[i]);
- return -EINVAL;
- }
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res),
- dev_name(dev))) {
- dev_err(dev, "could not request %s region for resource\n",
- mem_names[i]);
- return -EBUSY;
- }
-
- /*
- * TCMs are designed in general to support RAM-like backing
- * memories. So, map these as Normal Non-Cached memories. This
- * also avoids/fixes any potential alignment faults due to
- * unaligned data accesses when using memcpy() or memset()
- * functions (normally seen with device type memory).
- */
- core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
- resource_size(res));
- if (!core->mem[i].cpu_addr) {
- dev_err(dev, "failed to map %s memory\n", mem_names[i]);
- return -ENOMEM;
- }
- core->mem[i].bus_addr = res->start;
-
- /*
- * TODO:
- * The R5F cores can place ATCM & BTCM anywhere in its address
- * based on the corresponding Region Registers in the System
- * Control coprocessor. For now, place ATCM and BTCM at
- * addresses 0 and 0x41010000 (same as the bus address on AM65x
- * SoCs) based on loczrama setting
- */
- if (!strcmp(mem_names[i], "atcm")) {
- core->mem[i].dev_addr = core->loczrama ?
- 0 : K3_R5_TCM_DEV_ADDR;
- } else {
- core->mem[i].dev_addr = core->loczrama ?
- K3_R5_TCM_DEV_ADDR : 0;
- }
- core->mem[i].size = resource_size(res);
-
- dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- mem_names[i], &core->mem[i].bus_addr,
- core->mem[i].size, core->mem[i].cpu_addr,
- core->mem[i].dev_addr);
- }
- core->num_mems = num_mems;
-
- return 0;
-}
-
-static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
- struct k3_r5_core *core)
-{
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct device_node *sram_np;
- struct resource res;
- int num_sram;
- int i, ret;
-
- num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
- if (num_sram <= 0) {
- dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
- num_sram);
- return 0;
- }
-
- core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
- if (!core->sram)
- return -ENOMEM;
-
- for (i = 0; i < num_sram; i++) {
- sram_np = of_parse_phandle(np, "sram", i);
- if (!sram_np)
- return -EINVAL;
-
- if (!of_device_is_available(sram_np)) {
- of_node_put(sram_np);
- return -EINVAL;
- }
-
- ret = of_address_to_resource(sram_np, 0, &res);
- of_node_put(sram_np);
- if (ret)
- return -EINVAL;
-
- core->sram[i].bus_addr = res.start;
- core->sram[i].dev_addr = res.start;
- core->sram[i].size = resource_size(&res);
- core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
- resource_size(&res));
- if (!core->sram[i].cpu_addr) {
- dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
- i, &res.start);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i, &core->sram[i].bus_addr,
- core->sram[i].size, core->sram[i].cpu_addr,
- core->sram[i].dev_addr);
- }
- core->num_sram = num_sram;
-
- return 0;
-}
-
-static void k3_r5_release_tsp(void *data)
-{
- struct ti_sci_proc *tsp = data;
-
- ti_sci_proc_release(tsp);
-}
-
static int k3_r5_core_of_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1539,58 +1256,12 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
goto err;
}
- core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
- if (IS_ERR(core->ti_sci)) {
- ret = dev_err_probe(dev, PTR_ERR(core->ti_sci), "failed to get ti-sci handle\n");
- core->ti_sci = NULL;
- goto err;
- }
-
- ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
- if (ret) {
- dev_err(dev, "missing 'ti,sci-dev-id' property\n");
- goto err;
- }
-
- core->reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR_OR_NULL(core->reset)) {
- ret = PTR_ERR_OR_ZERO(core->reset);
- if (!ret)
- ret = -ENODEV;
- dev_err_probe(dev, ret, "failed to get reset handle\n");
- goto err;
- }
-
- core->tsp = ti_sci_proc_of_get_tsp(dev, core->ti_sci);
- if (IS_ERR(core->tsp)) {
- ret = dev_err_probe(dev, PTR_ERR(core->tsp),
- "failed to construct ti-sci proc control\n");
- goto err;
- }
-
- ret = k3_r5_core_of_get_internal_memories(pdev, core);
- if (ret) {
- dev_err(dev, "failed to get internal memories, ret = %d\n",
- ret);
- goto err;
- }
-
ret = k3_r5_core_of_get_sram_memories(pdev, core);
if (ret) {
dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
goto err;
}
- ret = ti_sci_proc_request(core->tsp);
- if (ret < 0) {
- dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
- goto err;
- }
-
- ret = devm_add_action_or_reset(dev, k3_r5_release_tsp, core->tsp);
- if (ret)
- goto err;
-
platform_set_drvdata(pdev, core);
devres_close_group(dev, k3_r5_core_of_init);
@@ -1652,6 +1323,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
}
core = platform_get_drvdata(cpdev);
+ core->cluster = cluster;
put_device(&cpdev->dev);
list_add_tail(&core->elem, &cluster->cores);
}
@@ -1749,11 +1421,24 @@ static int k3_r5_probe(struct platform_device *pdev)
return 0;
}
+static const struct k3_rproc_mem_data r5_mems[] = {
+ { .name = "atcm", .dev_addr = 0x0 },
+ { .name = "btcm", .dev_addr = K3_R5_TCM_DEV_ADDR },
+};
+
+static const struct k3_rproc_dev_data r5_data = {
+ .mems = r5_mems,
+ .num_mems = ARRAY_SIZE(r5_mems),
+ .boot_align_addr = 0,
+ .uses_lreset = true,
+};
+
static const struct k3_r5_soc_data am65_j721e_soc_data = {
.tcm_is_double = false,
.tcm_ecc_autoinit = false,
.single_cpu_mode = false,
.is_single_core = false,
+ .core_data = &r5_data,
};
static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
@@ -1761,6 +1446,7 @@ static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
.is_single_core = false,
+ .core_data = &r5_data,
};
static const struct k3_r5_soc_data am64_soc_data = {
@@ -1768,6 +1454,7 @@ static const struct k3_r5_soc_data am64_soc_data = {
.tcm_ecc_autoinit = true,
.single_cpu_mode = true,
.is_single_core = false,
+ .core_data = &r5_data,
};
static const struct k3_r5_soc_data am62_soc_data = {
@@ -1775,6 +1462,7 @@ static const struct k3_r5_soc_data am62_soc_data = {
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
.is_single_core = true,
+ .core_data = &r5_data,
};
static const struct of_device_id k3_r5_of_match[] = {
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 5aeedeaf3c41..1af89782e116 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -380,6 +380,18 @@ static int zynqmp_r5_rproc_start(struct rproc *rproc)
dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr,
bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
+ /* Request node before starting RPU core if new version of API is supported */
+ if (zynqmp_pm_feature(PM_REQUEST_NODE) > 1) {
+ ret = zynqmp_pm_request_node(r5_core->pm_domain_id,
+ ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0) {
+ dev_err(r5_core->dev, "failed to request 0x%x",
+ r5_core->pm_domain_id);
+ return ret;
+ }
+ }
+
ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1,
bootmem, ZYNQMP_PM_REQUEST_ACK_NO);
if (ret)
@@ -401,10 +413,30 @@ static int zynqmp_r5_rproc_stop(struct rproc *rproc)
struct zynqmp_r5_core *r5_core = rproc->priv;
int ret;
+ /* Use release node API to stop core if new version of API is supported */
+ if (zynqmp_pm_feature(PM_RELEASE_NODE) > 1) {
+ ret = zynqmp_pm_release_node(r5_core->pm_domain_id);
+ if (ret)
+ dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check expected version of EEMI call before calling it. This avoids
+ * any error or warning prints from firmware as it is expected that fw
+ * doesn't support it.
+ */
+ if (zynqmp_pm_feature(PM_FORCE_POWERDOWN) != 1) {
+ dev_dbg(r5_core->dev, "EEMI interface %d ver 1 not supported\n",
+ PM_FORCE_POWERDOWN);
+ return -EOPNOTSUPP;
+ }
+
+ /* maintain force pwr down for backward compatibility */
ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret)
- dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
+ dev_err(r5_core->dev, "core force power down failed\n");
return ret;
}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 99f6f9784e68..d85be5899da6 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -225,6 +225,13 @@ config RESET_RZG2L_USBPHY_CTRL
Support for USBPHY Control found on RZ/G2L family. It mainly
controls reset and power down of the USB/PHY.
+config RESET_RZV2H_USB2PHY
+ tristate "Renesas RZ/V2H(P) (and similar SoCs) USB2PHY Reset driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Support for USB2PHY Port reset Control found on the RZ/V2H(P) SoC
+ (and similar SoCs).
+
config RESET_SCMI
tristate "Reset driver controlled via ARM SCMI interface"
depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
@@ -279,6 +286,16 @@ config RESET_SUNXI
help
This enables the reset driver for Allwinner SoCs.
+config RESET_TH1520
+ tristate "T-HEAD 1520 reset controller"
+ depends on ARCH_THEAD || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This driver provides support for the T-HEAD TH1520 SoC reset controller,
+ which manages hardware reset lines for SoC components such as the GPU.
+ Enable this option if you need to control hardware resets on TH1520-based
+ systems.
+
config RESET_TI_SCI
tristate "TI System Control Interface (TI-SCI) reset driver"
depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 31f9904d13f9..91e6348e3351 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -31,11 +31,13 @@ obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o
obj-$(CONFIG_RESET_RZG2L_USBPHY_CTRL) += reset-rzg2l-usbphy-ctrl.o
+obj-$(CONFIG_RESET_RZV2H_USB2PHY) += reset-rzv2h-usb2phy.o
obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
+obj-$(CONFIG_RESET_TH1520) += reset-th1520.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
obj-$(CONFIG_RESET_TI_TPS380X) += reset-tps380x.o
diff --git a/drivers/reset/reset-rzv2h-usb2phy.c b/drivers/reset/reset-rzv2h-usb2phy.c
new file mode 100644
index 000000000000..ae643575b067
--- /dev/null
+++ b/drivers/reset/reset-rzv2h-usb2phy.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2H(P) USB2PHY Port reset control driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/reset-controller.h>
+
+struct rzv2h_usb2phy_regval {
+ u16 reg;
+ u16 val;
+};
+
+struct rzv2h_usb2phy_reset_of_data {
+ const struct rzv2h_usb2phy_regval *init_vals;
+ unsigned int init_val_count;
+
+ u16 reset_reg;
+ u16 reset_assert_val;
+ u16 reset_deassert_val;
+ u16 reset_status_bits;
+ u16 reset_release_val;
+
+ u16 reset2_reg;
+ u16 reset2_acquire_val;
+ u16 reset2_release_val;
+};
+
+struct rzv2h_usb2phy_reset_priv {
+ const struct rzv2h_usb2phy_reset_of_data *data;
+ void __iomem *base;
+ struct device *dev;
+ struct reset_controller_dev rcdev;
+ spinlock_t lock; /* protects register accesses */
+};
+
+static inline struct rzv2h_usb2phy_reset_priv
+*rzv2h_usbphy_rcdev_to_priv(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct rzv2h_usb2phy_reset_priv, rcdev);
+}
+
+/* This function must be called only after pm_runtime_resume_and_get() has been called */
+static void rzv2h_usbphy_assert_helper(struct rzv2h_usb2phy_reset_priv *priv)
+{
+ const struct rzv2h_usb2phy_reset_of_data *data = priv->data;
+
+ scoped_guard(spinlock, &priv->lock) {
+ writel(data->reset2_acquire_val, priv->base + data->reset2_reg);
+ writel(data->reset_assert_val, priv->base + data->reset_reg);
+ }
+
+ usleep_range(11, 20);
+}
+
+static int rzv2h_usbphy_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_usb2phy_reset_priv *priv = rzv2h_usbphy_rcdev_to_priv(rcdev);
+ struct device *dev = priv->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ return ret;
+ }
+
+ rzv2h_usbphy_assert_helper(priv);
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int rzv2h_usbphy_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_usb2phy_reset_priv *priv = rzv2h_usbphy_rcdev_to_priv(rcdev);
+ const struct rzv2h_usb2phy_reset_of_data *data = priv->data;
+ struct device *dev = priv->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ return ret;
+ }
+
+ scoped_guard(spinlock, &priv->lock) {
+ writel(data->reset_deassert_val, priv->base + data->reset_reg);
+ writel(data->reset2_release_val, priv->base + data->reset2_reg);
+ writel(data->reset_release_val, priv->base + data->reset_reg);
+ }
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int rzv2h_usbphy_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_usb2phy_reset_priv *priv = rzv2h_usbphy_rcdev_to_priv(rcdev);
+ struct device *dev = priv->dev;
+ int ret;
+ u32 reg;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ return ret;
+ }
+
+ reg = readl(priv->base + priv->data->reset_reg);
+
+ pm_runtime_put(dev);
+
+ return (reg & priv->data->reset_status_bits) == priv->data->reset_status_bits;
+}
+
+static const struct reset_control_ops rzv2h_usbphy_reset_ops = {
+ .assert = rzv2h_usbphy_reset_assert,
+ .deassert = rzv2h_usbphy_reset_deassert,
+ .status = rzv2h_usbphy_reset_status,
+};
+
+static int rzv2h_usb2phy_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ /* No special handling needed, we have only one reset line per device */
+ return 0;
+}
+
+static int rzv2h_usb2phy_reset_probe(struct platform_device *pdev)
+{
+ const struct rzv2h_usb2phy_reset_of_data *data;
+ struct rzv2h_usb2phy_reset_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct reset_control *rstc;
+ int error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ data = of_device_get_match_data(dev);
+ priv->data = data;
+ priv->dev = dev;
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ rstc = devm_reset_control_get_shared_deasserted(dev, NULL);
+ if (IS_ERR(rstc))
+ return dev_err_probe(dev, PTR_ERR(rstc),
+ "failed to get deasserted reset\n");
+
+ spin_lock_init(&priv->lock);
+
+ error = devm_pm_runtime_enable(dev);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to enable pm_runtime\n");
+
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
+ return dev_err_probe(dev, error, "pm_runtime_resume_and_get failed\n");
+
+ for (unsigned int i = 0; i < data->init_val_count; i++)
+ writel(data->init_vals[i].val, priv->base + data->init_vals[i].reg);
+
+ /* keep usb2phy in asserted state */
+ rzv2h_usbphy_assert_helper(priv);
+
+ pm_runtime_put(dev);
+
+ priv->rcdev.ops = &rzv2h_usbphy_reset_ops;
+ priv->rcdev.of_reset_n_cells = 0;
+ priv->rcdev.nr_resets = 1;
+ priv->rcdev.of_xlate = rzv2h_usb2phy_reset_of_xlate;
+ priv->rcdev.of_node = dev->of_node;
+ priv->rcdev.dev = dev;
+
+ return devm_reset_controller_register(dev, &priv->rcdev);
+}
+
+/*
+ * initialization values required to prepare the PHY to receive
+ * assert and deassert requests.
+ */
+static const struct rzv2h_usb2phy_regval rzv2h_init_vals[] = {
+ { .reg = 0xc10, .val = 0x67c },
+ { .reg = 0xc14, .val = 0x1f },
+ { .reg = 0x600, .val = 0x909 },
+};
+
+static const struct rzv2h_usb2phy_reset_of_data rzv2h_reset_of_data = {
+ .init_vals = rzv2h_init_vals,
+ .init_val_count = ARRAY_SIZE(rzv2h_init_vals),
+ .reset_reg = 0,
+ .reset_assert_val = 0x206,
+ .reset_status_bits = BIT(2),
+ .reset_deassert_val = 0x200,
+ .reset_release_val = 0x0,
+ .reset2_reg = 0xb04,
+ .reset2_acquire_val = 0x303,
+ .reset2_release_val = 0x3,
+};
+
+static const struct of_device_id rzv2h_usb2phy_reset_of_match[] = {
+ { .compatible = "renesas,r9a09g057-usb2phy-reset", .data = &rzv2h_reset_of_data },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzv2h_usb2phy_reset_of_match);
+
+static struct platform_driver rzv2h_usb2phy_reset_driver = {
+ .driver = {
+ .name = "rzv2h_usb2phy_reset",
+ .of_match_table = rzv2h_usb2phy_reset_of_match,
+ },
+ .probe = rzv2h_usb2phy_reset_probe,
+};
+module_platform_driver(rzv2h_usb2phy_reset_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/V2H(P) USB2PHY Control");
diff --git a/drivers/reset/reset-th1520.c b/drivers/reset/reset-th1520.c
new file mode 100644
index 000000000000..7874f0693e1b
--- /dev/null
+++ b/drivers/reset/reset-th1520.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/reset/thead,th1520-reset.h>
+
+ /* register offset in VOSYS_REGMAP */
+#define TH1520_GPU_RST_CFG 0x0
+#define TH1520_GPU_RST_CFG_MASK GENMASK(1, 0)
+
+/* register values */
+#define TH1520_GPU_SW_GPU_RST BIT(0)
+#define TH1520_GPU_SW_CLKGEN_RST BIT(1)
+
+struct th1520_reset_priv {
+ struct reset_controller_dev rcdev;
+ struct regmap *map;
+};
+
+struct th1520_reset_map {
+ u32 bit;
+ u32 reg;
+};
+
+static const struct th1520_reset_map th1520_resets[] = {
+ [TH1520_RESET_ID_GPU] = {
+ .bit = TH1520_GPU_SW_GPU_RST,
+ .reg = TH1520_GPU_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPU_CLKGEN] = {
+ .bit = TH1520_GPU_SW_CLKGEN_RST,
+ .reg = TH1520_GPU_RST_CFG,
+ }
+};
+
+static inline struct th1520_reset_priv *
+to_th1520_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct th1520_reset_priv, rcdev);
+}
+
+static int th1520_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct th1520_reset_priv *priv = to_th1520_reset(rcdev);
+ const struct th1520_reset_map *reset;
+
+ reset = &th1520_resets[id];
+
+ return regmap_update_bits(priv->map, reset->reg, reset->bit, 0);
+}
+
+static int th1520_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct th1520_reset_priv *priv = to_th1520_reset(rcdev);
+ const struct th1520_reset_map *reset;
+
+ reset = &th1520_resets[id];
+
+ return regmap_update_bits(priv->map, reset->reg, reset->bit,
+ reset->bit);
+}
+
+static const struct reset_control_ops th1520_reset_ops = {
+ .assert = th1520_reset_assert,
+ .deassert = th1520_reset_deassert,
+};
+
+static const struct regmap_config th1520_reset_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static int th1520_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct th1520_reset_priv *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->map = devm_regmap_init_mmio(dev, base,
+ &th1520_reset_regmap_config);
+ if (IS_ERR(priv->map))
+ return PTR_ERR(priv->map);
+
+ /* Initialize GPU resets to asserted state */
+ ret = regmap_update_bits(priv->map, TH1520_GPU_RST_CFG,
+ TH1520_GPU_RST_CFG_MASK, 0);
+ if (ret)
+ return ret;
+
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.nr_resets = ARRAY_SIZE(th1520_resets);
+ priv->rcdev.ops = &th1520_reset_ops;
+ priv->rcdev.of_node = dev->of_node;
+
+ return devm_reset_controller_register(dev, &priv->rcdev);
+}
+
+static const struct of_device_id th1520_reset_match[] = {
+ { .compatible = "thead,th1520-reset" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, th1520_reset_match);
+
+static struct platform_driver th1520_reset_driver = {
+ .driver = {
+ .name = "th1520-reset",
+ .of_match_table = th1520_reset_match,
+ },
+ .probe = th1520_reset_probe,
+};
+module_platform_driver(th1520_reset_driver);
+
+MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
+MODULE_DESCRIPTION("T-HEAD TH1520 SoC reset controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 40d386809d6b..87c944d4b4f3 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -746,7 +746,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
__le32 hdr[5] = { cpu_to_le32(len), };
int tlen = sizeof(hdr) + len;
unsigned long flags;
- int ret;
+ int ret = 0;
/* Word aligned channels only accept word size aligned data */
if (channel->info_word && len % 4)
@@ -1369,7 +1369,8 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
if (IS_ERR(edge->mbox_chan)) {
if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
- ret = PTR_ERR(edge->mbox_chan);
+ ret = dev_err_probe(dev, PTR_ERR(edge->mbox_chan),
+ "failed to acquire IPC mailbox\n");
goto put_node;
}
@@ -1386,6 +1387,7 @@ static int qcom_smd_parse_edge(struct device *dev,
of_node_put(syscon_np);
if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
+ dev_err(dev, "failed to get regmap from syscon: %d\n", ret);
goto put_node;
}
@@ -1501,10 +1503,8 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
}
ret = qcom_smd_parse_edge(&edge->dev, node, edge);
- if (ret) {
- dev_err(&edge->dev, "failed to parse smd edge\n");
+ if (ret)
goto unregister_dev;
- }
ret = qcom_smd_create_chrdev(edge);
if (ret) {
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 207b64c0a2fe..6ee36adcbdba 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -194,38 +194,6 @@ int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
EXPORT_SYMBOL(rpmsg_sendto);
/**
- * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
- * @ept: the rpmsg endpoint
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @ept
- * endpoint belongs to.
- * In case there are no TX buffers available, the function will block until
- * one becomes available, or a timeout of 15 seconds elapses. When the latter
- * happens, -ERESTARTSYS is returned.
- *
- * Can only be called from process context (for now).
- *
- * Return: 0 on success and an appropriate error value on failure.
- */
-int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len)
-{
- if (WARN_ON(!ept))
- return -EINVAL;
- if (!ept->ops->send_offchannel)
- return -ENXIO;
-
- return ept->ops->send_offchannel(ept, src, dst, data, len);
-}
-EXPORT_SYMBOL(rpmsg_send_offchannel);
-
-/**
* rpmsg_trysend() - send a message across to the remote processor
* @ept: the rpmsg endpoint
* @data: payload of message
@@ -302,37 +270,6 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
EXPORT_SYMBOL(rpmsg_poll);
/**
- * rpmsg_trysend_offchannel() - send a message using explicit src/dst addresses
- * @ept: the rpmsg endpoint
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @ept
- * endpoint belongs to.
- * In case there are no TX buffers available, the function will immediately
- * return -ENOMEM without waiting until one becomes available.
- *
- * Can only be called from process context (for now).
- *
- * Return: 0 on success and an appropriate error value on failure.
- */
-int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len)
-{
- if (WARN_ON(!ept))
- return -EINVAL;
- if (!ept->ops->trysend_offchannel)
- return -ENXIO;
-
- return ept->ops->trysend_offchannel(ept, src, dst, data, len);
-}
-EXPORT_SYMBOL(rpmsg_trysend_offchannel);
-
-/**
* rpmsg_set_flow_control() - request remote to pause/resume transmission
* @ept: the rpmsg endpoint
* @pause: pause transmission
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 42c7007be1b5..397e4926bd02 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -50,10 +50,8 @@ struct rpmsg_device_ops {
* @destroy_ept: see @rpmsg_destroy_ept(), required
* @send: see @rpmsg_send(), required
* @sendto: see @rpmsg_sendto(), optional
- * @send_offchannel: see @rpmsg_send_offchannel(), optional
* @trysend: see @rpmsg_trysend(), required
* @trysendto: see @rpmsg_trysendto(), optional
- * @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
* @poll: see @rpmsg_poll(), optional
* @set_flow_control: see @rpmsg_set_flow_control(), optional
* @get_mtu: see @rpmsg_get_mtu(), optional
@@ -67,13 +65,9 @@ struct rpmsg_endpoint_ops {
int (*send)(struct rpmsg_endpoint *ept, void *data, int len);
int (*sendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
- int (*send_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
int (*trysend)(struct rpmsg_endpoint *ept, void *data, int len);
int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
- int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
__poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
int (*set_flow_control)(struct rpmsg_endpoint *ept, bool pause, u32 dst);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 89d7a3b8c48b..4730b1c8b322 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -141,13 +141,9 @@ static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept);
static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
u32 dst);
-static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len);
static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
int len, u32 dst);
-static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len);
static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept);
static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
struct rpmsg_channel_info *chinfo);
@@ -156,10 +152,8 @@ static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
.destroy_ept = virtio_rpmsg_destroy_ept,
.send = virtio_rpmsg_send,
.sendto = virtio_rpmsg_sendto,
- .send_offchannel = virtio_rpmsg_send_offchannel,
.trysend = virtio_rpmsg_trysend,
.trysendto = virtio_rpmsg_trysendto,
- .trysend_offchannel = virtio_rpmsg_trysend_offchannel,
.get_mtu = virtio_rpmsg_get_mtu,
};
@@ -545,7 +539,7 @@ static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
* the function will immediately fail, and -ENOMEM will be returned.
*
* Normally drivers shouldn't use this function directly; instead, drivers
- * should use the appropriate rpmsg_{try}send{to, _offchannel} API
+ * should use the appropriate rpmsg_{try}send{to} API
* (see include/linux/rpmsg.h).
*
* Return: 0 on success and an appropriate error value on failure.
@@ -665,14 +659,6 @@ static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
}
-static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
-{
- struct rpmsg_device *rpdev = ept->rpdev;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
-}
-
static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
struct rpmsg_device *rpdev = ept->rpdev;
@@ -690,14 +676,6 @@ static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
}
-static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
-{
- struct rpmsg_device *rpdev = ept->rpdev;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
-}
-
static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept)
{
struct rpmsg_device *rpdev = ept->rpdev;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index dac85294d2f5..e284eea331d7 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -255,7 +255,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
/*
* The recording commands needs to be called with option QID
- * for guests that have previlege classes A or B.
+ * for guests that have privilege classes A or B.
* Purging has to be done as separate step, because recording
* can't be switched on as long as records are on the queue.
* Doing both at the same time doesn't work.
@@ -557,7 +557,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev,
/*
* The recording command needs to be called with option QID
- * for guests that have previlege classes A or B.
+ * for guests that have privilege classes A or B.
* Other guests will not recognize the command and we have to
* issue the same command without the QID parameter.
*/
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 6a8daeb8c4b9..a2d65adffb80 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -23,11 +23,13 @@ source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
source "drivers/soc/samsung/Kconfig"
+source "drivers/soc/sophgo/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
source "drivers/soc/ux500/Kconfig"
source "drivers/soc/versatile/Kconfig"
+source "drivers/soc/vt8500/Kconfig"
source "drivers/soc/xilinx/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 2037a8695cb2..c9e689080ceb 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -29,9 +29,11 @@ obj-y += qcom/
obj-y += renesas/
obj-y += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
+obj-y += sophgo/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-y += versatile/
+obj-y += vt8500/
obj-y += xilinx/
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index a6453ffeb753..d862e30a244e 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -14,11 +14,6 @@
static DEFINE_MUTEX(measure_lock);
-#define MSR_CLK_DUTY 0x0
-#define MSR_CLK_REG0 0x4
-#define MSR_CLK_REG1 0x8
-#define MSR_CLK_REG2 0xc
-
#define MSR_DURATION GENMASK(15, 0)
#define MSR_ENABLE BIT(16)
#define MSR_CONT BIT(17) /* continuous measurement */
@@ -33,23 +28,34 @@ static DEFINE_MUTEX(measure_lock);
#define DIV_STEP 32
#define DIV_MAX 640
-#define CLK_MSR_MAX 128
-
struct meson_msr_id {
struct meson_msr *priv;
unsigned int id;
const char *name;
};
+struct msr_reg_offset {
+ unsigned int duty_val;
+ unsigned int freq_ctrl;
+ unsigned int duty_ctrl;
+ unsigned int freq_val;
+};
+
+struct meson_msr_data {
+ struct meson_msr_id *msr_table;
+ unsigned int msr_count;
+ const struct msr_reg_offset *reg;
+};
+
struct meson_msr {
struct regmap *regmap;
- struct meson_msr_id msr_table[CLK_MSR_MAX];
+ struct meson_msr_data data;
};
#define CLK_MSR_ID(__id, __name) \
[__id] = {.id = __id, .name = __name,}
-static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_m8[] = {
CLK_MSR_ID(0, "ring_osc_out_ee0"),
CLK_MSR_ID(1, "ring_osc_out_ee1"),
CLK_MSR_ID(2, "ring_osc_out_ee2"),
@@ -98,7 +104,7 @@ static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = {
CLK_MSR_ID(63, "mipi_csi_cfg"),
};
-static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_gx[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -168,7 +174,7 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
CLK_MSR_ID(82, "ge2d"),
};
-static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_axg[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -242,7 +248,7 @@ static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
CLK_MSR_ID(109, "audio_locker_in"),
};
-static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_g12a[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -358,7 +364,7 @@ static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
CLK_MSR_ID(122, "audio_pdm_dclk"),
};
-static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_sm1[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -488,10 +494,304 @@ static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = {
CLK_MSR_ID(127, "csi2_data"),
};
+static const struct meson_msr_id clk_msr_c3[] = {
+ CLK_MSR_ID(0, "sys_clk"),
+ CLK_MSR_ID(1, "axi_clk"),
+ CLK_MSR_ID(2, "rtc_clk"),
+ CLK_MSR_ID(3, "p20_usb2_ckout"),
+ CLK_MSR_ID(4, "eth_mpll_test"),
+ CLK_MSR_ID(5, "sys_pll"),
+ CLK_MSR_ID(6, "cpu_clk_div16"),
+ CLK_MSR_ID(7, "ts_pll"),
+ CLK_MSR_ID(8, "fclk_div2"),
+ CLK_MSR_ID(9, "fclk_div2p5"),
+ CLK_MSR_ID(10, "fclk_div3"),
+ CLK_MSR_ID(11, "fclk_div4"),
+ CLK_MSR_ID(12, "fclk_div5"),
+ CLK_MSR_ID(13, "fclk_div7"),
+ CLK_MSR_ID(15, "fclk_50m"),
+ CLK_MSR_ID(16, "sys_oscin32k_i"),
+ CLK_MSR_ID(17, "mclk_pll"),
+ CLK_MSR_ID(19, "hifi_pll"),
+ CLK_MSR_ID(20, "gp0_pll"),
+ CLK_MSR_ID(21, "gp1_pll"),
+ CLK_MSR_ID(22, "eth_mppll_50m_ckout"),
+ CLK_MSR_ID(23, "sys_pll_div16"),
+ CLK_MSR_ID(24, "ddr_dpll_pt_clk"),
+ CLK_MSR_ID(26, "nna_core"),
+ CLK_MSR_ID(27, "rtc_sec_pulse_out"),
+ CLK_MSR_ID(28, "rtc_osc_clk_out"),
+ CLK_MSR_ID(29, "debug_in_clk"),
+ CLK_MSR_ID(30, "mod_eth_phy_ref_clk"),
+ CLK_MSR_ID(31, "mod_eth_tx_clk"),
+ CLK_MSR_ID(32, "eth_125m"),
+ CLK_MSR_ID(33, "eth_rmii"),
+ CLK_MSR_ID(34, "co_clkin_to_mac"),
+ CLK_MSR_ID(36, "co_rx_clk"),
+ CLK_MSR_ID(37, "co_tx_clk"),
+ CLK_MSR_ID(38, "eth_phy_rxclk"),
+ CLK_MSR_ID(39, "eth_phy_plltxclk"),
+ CLK_MSR_ID(40, "ephy_test_clk"),
+ CLK_MSR_ID(66, "vapb"),
+ CLK_MSR_ID(67, "ge2d"),
+ CLK_MSR_ID(68, "dewarpa"),
+ CLK_MSR_ID(70, "mipi_dsi_meas"),
+ CLK_MSR_ID(71, "dsi_phy"),
+ CLK_MSR_ID(79, "rama"),
+ CLK_MSR_ID(94, "vc9000e_core"),
+ CLK_MSR_ID(95, "vc9000e_sys"),
+ CLK_MSR_ID(96, "vc9000e_aclk"),
+ CLK_MSR_ID(97, "hcodec"),
+ CLK_MSR_ID(106, "deskew_pll_clk_div32_out"),
+ CLK_MSR_ID(107, "mipi_csi_phy_clk_out[0]"),
+ CLK_MSR_ID(108, "mipi_csi_phy_clk_out[1]"),
+ CLK_MSR_ID(110, "spifc"),
+ CLK_MSR_ID(111, "saradc"),
+ CLK_MSR_ID(112, "ts"),
+ CLK_MSR_ID(113, "sd_emmc_c"),
+ CLK_MSR_ID(114, "sd_emmc_b"),
+ CLK_MSR_ID(115, "sd_emmc_a"),
+ CLK_MSR_ID(116, "gpio_msr_clk"),
+ CLK_MSR_ID(117, "spicc_b"),
+ CLK_MSR_ID(118, "spicc_a"),
+ CLK_MSR_ID(122, "mod_audio_pdm_dclk_o"),
+ CLK_MSR_ID(124, "o_earcrx_dmac_clk"),
+ CLK_MSR_ID(125, "o_earcrx_cmdc_clk"),
+ CLK_MSR_ID(126, "o_earctx_dmac_clk"),
+ CLK_MSR_ID(127, "o_earctx_cmdc_clk"),
+ CLK_MSR_ID(128, "o_tohdmitx_bclk"),
+ CLK_MSR_ID(129, "o_tohdmitx_mclk"),
+ CLK_MSR_ID(130, "o_tohdmitx_spdif_clk"),
+ CLK_MSR_ID(131, "o_toacodec_bclk"),
+ CLK_MSR_ID(132, "o_toacodec_mclk"),
+ CLK_MSR_ID(133, "o_spdifout_b_mst_clk"),
+ CLK_MSR_ID(134, "o_spdifout_mst_clk"),
+ CLK_MSR_ID(135, "o_spdifin_mst_clk"),
+ CLK_MSR_ID(136, "o_audio_mclk"),
+ CLK_MSR_ID(137, "o_vad_clk"),
+ CLK_MSR_ID(138, "o_tdmout_d_sclk"),
+ CLK_MSR_ID(139, "o_tdmout_c_sclk"),
+ CLK_MSR_ID(140, "o_tdmout_b_sclk"),
+ CLK_MSR_ID(141, "o_tdmout_a_sclk"),
+ CLK_MSR_ID(142, "o_tdminb_1b_sclk"),
+ CLK_MSR_ID(143, "o_tdmin_1b_sclk"),
+ CLK_MSR_ID(144, "o_tdmin_d_sclk"),
+ CLK_MSR_ID(145, "o_tdmin_c_sclk"),
+ CLK_MSR_ID(146, "o_tdmin_b_sclk"),
+ CLK_MSR_ID(147, "o_tdmin_a_sclk"),
+ CLK_MSR_ID(148, "o_resampleb_clk"),
+ CLK_MSR_ID(149, "o_resamplea_clk"),
+ CLK_MSR_ID(150, "o_pdmb_sysclk"),
+ CLK_MSR_ID(151, "o_pdmb_dclk"),
+ CLK_MSR_ID(152, "o_pdm_sysclk"),
+ CLK_MSR_ID(153, "o_pdm_dclk"),
+ CLK_MSR_ID(154, "c_alockerb_out_clk"),
+ CLK_MSR_ID(155, "c_alockerb_in_clk"),
+ CLK_MSR_ID(156, "c_alocker_out_clk"),
+ CLK_MSR_ID(157, "c_alocker_in_clk"),
+ CLK_MSR_ID(158, "audio_mst_clk[34]"),
+ CLK_MSR_ID(159, "audio_mst_clk[35]"),
+ CLK_MSR_ID(160, "pwm_n"),
+ CLK_MSR_ID(161, "pwm_m"),
+ CLK_MSR_ID(162, "pwm_l"),
+ CLK_MSR_ID(163, "pwm_k"),
+ CLK_MSR_ID(164, "pwm_j"),
+ CLK_MSR_ID(165, "pwm_i"),
+ CLK_MSR_ID(166, "pwm_h"),
+ CLK_MSR_ID(167, "pwm_g"),
+ CLK_MSR_ID(168, "pwm_f"),
+ CLK_MSR_ID(169, "pwm_e"),
+ CLK_MSR_ID(170, "pwm_d"),
+ CLK_MSR_ID(171, "pwm_c"),
+ CLK_MSR_ID(172, "pwm_b"),
+ CLK_MSR_ID(173, "pwm_a"),
+ CLK_MSR_ID(174, "AU_DAC1_CLK_TO_GPIO"),
+ CLK_MSR_ID(175, "AU_ADC_CLK_TO_GPIO"),
+ CLK_MSR_ID(176, "rng_ring_osc_clk[0]"),
+ CLK_MSR_ID(177, "rng_ring_osc_clk[1]"),
+ CLK_MSR_ID(178, "rng_ring_osc_clk[2]"),
+ CLK_MSR_ID(179, "rng_ring_osc_clk[3]"),
+ CLK_MSR_ID(180, "sys_cpu_ring_osc_clk[0]"),
+ CLK_MSR_ID(181, "sys_cpu_ring_osc_clk[1]"),
+ CLK_MSR_ID(182, "sys_cpu_ring_osc_clk[2]"),
+ CLK_MSR_ID(183, "sys_cpu_ring_osc_clk[3]"),
+ CLK_MSR_ID(184, "sys_cpu_ring_osc_clk[4]"),
+ CLK_MSR_ID(185, "sys_cpu_ring_osc_clk[5]"),
+ CLK_MSR_ID(186, "sys_cpu_ring_osc_clk[6]"),
+ CLK_MSR_ID(187, "sys_cpu_ring_osc_clk[7]"),
+ CLK_MSR_ID(188, "sys_cpu_ring_osc_clk[8]"),
+ CLK_MSR_ID(189, "sys_cpu_ring_osc_clk[9]"),
+ CLK_MSR_ID(190, "sys_cpu_ring_osc_clk[10]"),
+ CLK_MSR_ID(191, "sys_cpu_ring_osc_clk[11]"),
+ CLK_MSR_ID(192, "am_ring_osc_clk_out[12](dmc)"),
+ CLK_MSR_ID(193, "am_ring_osc_clk_out[13](rama)"),
+ CLK_MSR_ID(194, "am_ring_osc_clk_out[14](nna)"),
+ CLK_MSR_ID(195, "am_ring_osc_clk_out[15](nna)"),
+ CLK_MSR_ID(200, "rng_ring_osc_clk_1[0]"),
+ CLK_MSR_ID(201, "rng_ring_osc_clk_1[1]"),
+ CLK_MSR_ID(202, "rng_ring_osc_clk_1[2]"),
+ CLK_MSR_ID(203, "rng_ring_osc_clk_1[3]"),
+
+};
+
+static const struct meson_msr_id clk_msr_s4[] = {
+ CLK_MSR_ID(0, "sys_clk"),
+ CLK_MSR_ID(1, "axi_clk"),
+ CLK_MSR_ID(2, "rtc_clk"),
+ CLK_MSR_ID(5, "mali"),
+ CLK_MSR_ID(6, "cpu_clk_div16"),
+ CLK_MSR_ID(7, "ceca_clk"),
+ CLK_MSR_ID(8, "cecb_clk"),
+ CLK_MSR_ID(10, "fclk_div5"),
+ CLK_MSR_ID(11, "mpll0"),
+ CLK_MSR_ID(12, "mpll1"),
+ CLK_MSR_ID(13, "mpll2"),
+ CLK_MSR_ID(14, "mpll3"),
+ CLK_MSR_ID(15, "fclk_50m"),
+ CLK_MSR_ID(16, "pcie_clk_inp"),
+ CLK_MSR_ID(17, "pcie_clk_inn"),
+ CLK_MSR_ID(18, "mpll_clk_test_out"),
+ CLK_MSR_ID(19, "hifi_pll"),
+ CLK_MSR_ID(20, "gp0_pll"),
+ CLK_MSR_ID(21, "gp1_pll"),
+ CLK_MSR_ID(22, "eth_mppll_50m_ckout"),
+ CLK_MSR_ID(23, "sys_pll_div16"),
+ CLK_MSR_ID(24, "ddr_dpll_pt_clk"),
+ CLK_MSR_ID(30, "mod_eth_phy_ref_clk"),
+ CLK_MSR_ID(31, "mod_eth_tx_clk"),
+ CLK_MSR_ID(32, "eth_125m"),
+ CLK_MSR_ID(33, "eth_rmii"),
+ CLK_MSR_ID(34, "co_clkin_to_mac"),
+ CLK_MSR_ID(35, "mod_eth_rx_clk_rmii"),
+ CLK_MSR_ID(36, "co_rx_clk"),
+ CLK_MSR_ID(37, "co_tx_clk"),
+ CLK_MSR_ID(38, "eth_phy_rxclk"),
+ CLK_MSR_ID(39, "eth_phy_plltxclk"),
+ CLK_MSR_ID(40, "ephy_test_clk"),
+ CLK_MSR_ID(50, "vid_pll_div_clk_out"),
+ CLK_MSR_ID(51, "enci"),
+ CLK_MSR_ID(52, "encp"),
+ CLK_MSR_ID(53, "encl"),
+ CLK_MSR_ID(54, "vdac"),
+ CLK_MSR_ID(55, "cdac_clk_c"),
+ CLK_MSR_ID(56, "mod_tcon_clko"),
+ CLK_MSR_ID(57, "lcd_an_clk_ph2"),
+ CLK_MSR_ID(58, "lcd_an_clk_ph3"),
+ CLK_MSR_ID(59, "hdmitx_pixel"),
+ CLK_MSR_ID(60, "vdin_meas"),
+ CLK_MSR_ID(61, "vpu"),
+ CLK_MSR_ID(62, "vpu_clkb"),
+ CLK_MSR_ID(63, "vpu_clkb_tmp"),
+ CLK_MSR_ID(64, "vpu_clkc"),
+ CLK_MSR_ID(65, "vid_lock"),
+ CLK_MSR_ID(66, "vapb"),
+ CLK_MSR_ID(67, "ge2d"),
+ CLK_MSR_ID(68, "cts_hdcp22_esmclk"),
+ CLK_MSR_ID(69, "cts_hdcp22_skpclk"),
+ CLK_MSR_ID(76, "hdmitx_tmds"),
+ CLK_MSR_ID(77, "hdmitx_sys_clk"),
+ CLK_MSR_ID(78, "hdmitx_fe_clk"),
+ CLK_MSR_ID(79, "rama"),
+ CLK_MSR_ID(93, "vdec"),
+ CLK_MSR_ID(99, "hevcf"),
+ CLK_MSR_ID(100, "demod_core"),
+ CLK_MSR_ID(101, "adc_extclk_in"),
+ CLK_MSR_ID(102, "cts_demod_core_t2_clk"),
+ CLK_MSR_ID(103, "adc_dpll_intclk"),
+ CLK_MSR_ID(104, "adc_dpll_clk_b3"),
+ CLK_MSR_ID(105, "s2_adc_clk"),
+ CLK_MSR_ID(106, "deskew_pll_clk_div32_out"),
+ CLK_MSR_ID(110, "sc"),
+ CLK_MSR_ID(111, "sar_adc"),
+ CLK_MSR_ID(113, "sd_emmc_c"),
+ CLK_MSR_ID(114, "sd_emmc_b"),
+ CLK_MSR_ID(115, "sd_emmc_a"),
+ CLK_MSR_ID(116, "gpio_msr_clk"),
+ CLK_MSR_ID(118, "spicc0"),
+ CLK_MSR_ID(121, "ts"),
+ CLK_MSR_ID(130, "audio_vad_clk"),
+ CLK_MSR_ID(131, "acodec_dac_clk_x128"),
+ CLK_MSR_ID(132, "audio_locker_in_clk"),
+ CLK_MSR_ID(133, "audio_locker_out_clk"),
+ CLK_MSR_ID(134, "audio_tdmout_c_sclk"),
+ CLK_MSR_ID(135, "audio_tdmout_b_sclk"),
+ CLK_MSR_ID(136, "audio_tdmout_a_sclk"),
+ CLK_MSR_ID(137, "audio_tdmin_lb_sclk"),
+ CLK_MSR_ID(138, "audio_tdmin_c_sclk"),
+ CLK_MSR_ID(139, "audio_tdmin_b_sclk"),
+ CLK_MSR_ID(140, "audio_tdmin_a_sclk"),
+ CLK_MSR_ID(141, "audio_resamplea_clk"),
+ CLK_MSR_ID(142, "audio_pdm_sysclk"),
+ CLK_MSR_ID(143, "audio_spdifout_b_mst_clk"),
+ CLK_MSR_ID(144, "audio_spdifout_mst_clk"),
+ CLK_MSR_ID(145, "audio_spdifin_mst_clk"),
+ CLK_MSR_ID(146, "audio_pdm_dclk"),
+ CLK_MSR_ID(147, "audio_resampleb_clk"),
+ CLK_MSR_ID(160, "pwm_j"),
+ CLK_MSR_ID(161, "pwm_i"),
+ CLK_MSR_ID(162, "pwm_h"),
+ CLK_MSR_ID(163, "pwm_g"),
+ CLK_MSR_ID(164, "pwm_f"),
+ CLK_MSR_ID(165, "pwm_e"),
+ CLK_MSR_ID(166, "pwm_d"),
+ CLK_MSR_ID(167, "pwm_c"),
+ CLK_MSR_ID(168, "pwm_b"),
+ CLK_MSR_ID(169, "pwm_a"),
+ CLK_MSR_ID(176, "rng_ring_0"),
+ CLK_MSR_ID(177, "rng_ring_1"),
+ CLK_MSR_ID(178, "rng_ring_2"),
+ CLK_MSR_ID(179, "rng_ring_3"),
+ CLK_MSR_ID(180, "dmc_osc_ring(LVT16)"),
+ CLK_MSR_ID(181, "gpu_osc_ring0(LVT16)"),
+ CLK_MSR_ID(182, "gpu_osc_ring1(ULVT16)"),
+ CLK_MSR_ID(183, "gpu_osc_ring2(SLVT16)"),
+ CLK_MSR_ID(184, "vpu_osc_ring0(SVT24)"),
+ CLK_MSR_ID(185, "vpu_osc_ring1(LVT20)"),
+ CLK_MSR_ID(186, "vpu_osc_ring2(LVT16)"),
+ CLK_MSR_ID(187, "dos_osc_ring0(SVT24)"),
+ CLK_MSR_ID(188, "dos_osc_ring1(SVT16)"),
+ CLK_MSR_ID(189, "dos_osc_ring2(LVT16)"),
+ CLK_MSR_ID(190, "dos_osc_ring3(ULVT20)"),
+ CLK_MSR_ID(192, "axi_sram_osc_ring(SVT16)"),
+ CLK_MSR_ID(193, "demod_osc_ring0"),
+ CLK_MSR_ID(194, "demod_osc_ring1"),
+ CLK_MSR_ID(195, "sar_osc_ring"),
+ CLK_MSR_ID(196, "sys_cpu_osc_ring0"),
+ CLK_MSR_ID(197, "sys_cpu_osc_ring1"),
+ CLK_MSR_ID(198, "sys_cpu_osc_ring2"),
+ CLK_MSR_ID(199, "sys_cpu_osc_ring3"),
+ CLK_MSR_ID(200, "sys_cpu_osc_ring4"),
+ CLK_MSR_ID(201, "sys_cpu_osc_ring5"),
+ CLK_MSR_ID(202, "sys_cpu_osc_ring6"),
+ CLK_MSR_ID(203, "sys_cpu_osc_ring7"),
+ CLK_MSR_ID(204, "sys_cpu_osc_ring8"),
+ CLK_MSR_ID(205, "sys_cpu_osc_ring9"),
+ CLK_MSR_ID(206, "sys_cpu_osc_ring10"),
+ CLK_MSR_ID(207, "sys_cpu_osc_ring11"),
+ CLK_MSR_ID(208, "sys_cpu_osc_ring12"),
+ CLK_MSR_ID(209, "sys_cpu_osc_ring13"),
+ CLK_MSR_ID(210, "sys_cpu_osc_ring14"),
+ CLK_MSR_ID(211, "sys_cpu_osc_ring15"),
+ CLK_MSR_ID(212, "sys_cpu_osc_ring16"),
+ CLK_MSR_ID(213, "sys_cpu_osc_ring17"),
+ CLK_MSR_ID(214, "sys_cpu_osc_ring18"),
+ CLK_MSR_ID(215, "sys_cpu_osc_ring19"),
+ CLK_MSR_ID(216, "sys_cpu_osc_ring20"),
+ CLK_MSR_ID(217, "sys_cpu_osc_ring21"),
+ CLK_MSR_ID(218, "sys_cpu_osc_ring22"),
+ CLK_MSR_ID(219, "sys_cpu_osc_ring23"),
+ CLK_MSR_ID(220, "sys_cpu_osc_ring24"),
+ CLK_MSR_ID(221, "sys_cpu_osc_ring25"),
+ CLK_MSR_ID(222, "sys_cpu_osc_ring26"),
+ CLK_MSR_ID(223, "sys_cpu_osc_ring27"),
+
+};
+
static int meson_measure_id(struct meson_msr_id *clk_msr_id,
- unsigned int duration)
+ unsigned int duration)
{
struct meson_msr *priv = clk_msr_id->priv;
+ const struct msr_reg_offset *reg = priv->data.reg;
unsigned int val;
int ret;
@@ -499,22 +799,22 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
if (ret)
return ret;
- regmap_write(priv->regmap, MSR_CLK_REG0, 0);
+ regmap_write(priv->regmap, reg->freq_ctrl, 0);
/* Set measurement duration */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_DURATION,
+ regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_DURATION,
FIELD_PREP(MSR_DURATION, duration - 1));
/* Set ID */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_CLK_SRC,
+ regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_CLK_SRC,
FIELD_PREP(MSR_CLK_SRC, clk_msr_id->id));
/* Enable & Start */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0,
+ regmap_update_bits(priv->regmap, reg->freq_ctrl,
MSR_RUN | MSR_ENABLE,
MSR_RUN | MSR_ENABLE);
- ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0,
+ ret = regmap_read_poll_timeout(priv->regmap, reg->freq_ctrl,
val, !(val & MSR_BUSY), 10, 10000);
if (ret) {
mutex_unlock(&measure_lock);
@@ -522,10 +822,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
}
/* Disable */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0);
+ regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_ENABLE, 0);
/* Get the value in multiple of gate time counts */
- regmap_read(priv->regmap, MSR_CLK_REG2, &val);
+ regmap_read(priv->regmap, reg->freq_val, &val);
mutex_unlock(&measure_lock);
@@ -573,13 +873,14 @@ DEFINE_SHOW_ATTRIBUTE(clk_msr);
static int clk_msr_summary_show(struct seq_file *s, void *data)
{
struct meson_msr_id *msr_table = s->private;
+ unsigned int msr_count = msr_table->priv->data.msr_count;
unsigned int precision = 0;
int val, i;
seq_puts(s, " clock rate precision\n");
seq_puts(s, "---------------------------------------------\n");
- for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
+ for (i = 0 ; i < msr_count ; ++i) {
if (!msr_table[i].name)
continue;
@@ -595,18 +896,18 @@ static int clk_msr_summary_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(clk_msr_summary);
-static const struct regmap_config meson_clk_msr_regmap_config = {
+static struct regmap_config meson_clk_msr_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = MSR_CLK_REG2,
};
static int meson_msr_probe(struct platform_device *pdev)
{
- const struct meson_msr_id *match_data;
+ const struct meson_msr_data *match_data;
struct meson_msr *priv;
struct dentry *root, *clks;
+ struct resource *res;
void __iomem *base;
int i;
@@ -621,60 +922,142 @@ static int meson_msr_probe(struct platform_device *pdev)
return -ENODEV;
}
- memcpy(priv->msr_table, match_data, sizeof(priv->msr_table));
+ priv->data.msr_table = devm_kcalloc(&pdev->dev,
+ match_data->msr_count,
+ sizeof(struct meson_msr_id),
+ GFP_KERNEL);
+ if (!priv->data.msr_table)
+ return -ENOMEM;
- base = devm_platform_ioremap_resource(pdev, 0);
+ memcpy(priv->data.msr_table, match_data->msr_table,
+ match_data->msr_count * sizeof(struct meson_msr_id));
+ priv->data.msr_count = match_data->msr_count;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
+ meson_clk_msr_regmap_config.max_register = resource_size(res) - 4;
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_clk_msr_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
+ priv->data.reg = devm_kzalloc(&pdev->dev, sizeof(struct msr_reg_offset),
+ GFP_KERNEL);
+ if (!priv->data.reg)
+ return -ENOMEM;
+
+ memcpy((void *)priv->data.reg, match_data->reg,
+ sizeof(struct msr_reg_offset));
+
root = debugfs_create_dir("meson-clk-msr", NULL);
clks = debugfs_create_dir("clks", root);
debugfs_create_file("measure_summary", 0444, root,
- priv->msr_table, &clk_msr_summary_fops);
+ priv->data.msr_table, &clk_msr_summary_fops);
- for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
- if (!priv->msr_table[i].name)
+ for (i = 0 ; i < priv->data.msr_count ; ++i) {
+ if (!priv->data.msr_table[i].name)
continue;
- priv->msr_table[i].priv = priv;
+ priv->data.msr_table[i].priv = priv;
- debugfs_create_file(priv->msr_table[i].name, 0444, clks,
- &priv->msr_table[i], &clk_msr_fops);
+ debugfs_create_file(priv->data.msr_table[i].name, 0444, clks,
+ &priv->data.msr_table[i], &clk_msr_fops);
}
return 0;
}
+static const struct msr_reg_offset msr_reg_offset = {
+ .duty_val = 0x0,
+ .freq_ctrl = 0x4,
+ .duty_ctrl = 0x8,
+ .freq_val = 0xc,
+};
+
+static const struct meson_msr_data clk_msr_gx_data = {
+ .msr_table = (void *)clk_msr_gx,
+ .msr_count = ARRAY_SIZE(clk_msr_gx),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_m8_data = {
+ .msr_table = (void *)clk_msr_m8,
+ .msr_count = ARRAY_SIZE(clk_msr_m8),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_axg_data = {
+ .msr_table = (void *)clk_msr_axg,
+ .msr_count = ARRAY_SIZE(clk_msr_axg),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_g12a_data = {
+ .msr_table = (void *)clk_msr_g12a,
+ .msr_count = ARRAY_SIZE(clk_msr_g12a),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_sm1_data = {
+ .msr_table = (void *)clk_msr_sm1,
+ .msr_count = ARRAY_SIZE(clk_msr_sm1),
+ .reg = &msr_reg_offset,
+};
+
+static const struct msr_reg_offset msr_reg_offset_v2 = {
+ .freq_ctrl = 0x0,
+ .duty_ctrl = 0x4,
+ .freq_val = 0x8,
+ .duty_val = 0x18,
+};
+
+static const struct meson_msr_data clk_msr_c3_data = {
+ .msr_table = (void *)clk_msr_c3,
+ .msr_count = ARRAY_SIZE(clk_msr_c3),
+ .reg = &msr_reg_offset_v2,
+};
+
+static const struct meson_msr_data clk_msr_s4_data = {
+ .msr_table = (void *)clk_msr_s4,
+ .msr_count = ARRAY_SIZE(clk_msr_s4),
+ .reg = &msr_reg_offset_v2,
+};
+
static const struct of_device_id meson_msr_match_table[] = {
{
.compatible = "amlogic,meson-gx-clk-measure",
- .data = (void *)clk_msr_gx,
+ .data = &clk_msr_gx_data,
},
{
.compatible = "amlogic,meson8-clk-measure",
- .data = (void *)clk_msr_m8,
+ .data = &clk_msr_m8_data,
},
{
.compatible = "amlogic,meson8b-clk-measure",
- .data = (void *)clk_msr_m8,
+ .data = &clk_msr_m8_data,
},
{
.compatible = "amlogic,meson-axg-clk-measure",
- .data = (void *)clk_msr_axg,
+ .data = &clk_msr_axg_data,
},
{
.compatible = "amlogic,meson-g12a-clk-measure",
- .data = (void *)clk_msr_g12a,
+ .data = &clk_msr_g12a_data,
},
{
.compatible = "amlogic,meson-sm1-clk-measure",
- .data = (void *)clk_msr_sm1,
+ .data = &clk_msr_sm1_data,
+ },
+ {
+ .compatible = "amlogic,c3-clk-measure",
+ .data = &clk_msr_c3_data,
+ },
+ {
+ .compatible = "amlogic,s4-clk-measure",
+ .data = &clk_msr_s4_data,
},
{ /* sentinel */ }
};
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index 9ab5ba9cf1d6..ef8f355589a5 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -166,7 +166,7 @@ static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
int rc;
lpc_snoop->irq = platform_get_irq(pdev, 0);
- if (!lpc_snoop->irq)
+ if (lpc_snoop->irq < 0)
return -ENODEV;
rc = devm_request_irq(dev, lpc_snoop->irq,
@@ -200,11 +200,15 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR;
lpc_snoop->chan[channel].miscdev.name =
devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel);
+ if (!lpc_snoop->chan[channel].miscdev.name) {
+ rc = -ENOMEM;
+ goto err_free_fifo;
+ }
lpc_snoop->chan[channel].miscdev.fops = &snoop_fops;
lpc_snoop->chan[channel].miscdev.parent = dev;
rc = misc_register(&lpc_snoop->chan[channel].miscdev);
if (rc)
- return rc;
+ goto err_free_fifo;
/* Enable LPC snoop channel at requested port */
switch (channel) {
@@ -221,7 +225,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
hicrb_en = HICRB_ENSNP1D;
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
+ goto err_misc_deregister;
}
regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
@@ -231,6 +236,12 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
regmap_update_bits(lpc_snoop->regmap, HICRB,
hicrb_en, hicrb_en);
+ return 0;
+
+err_misc_deregister:
+ misc_deregister(&lpc_snoop->chan[channel].miscdev);
+err_free_fifo:
+ kfifo_free(&lpc_snoop->chan[channel].fifo);
return rc;
}
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index a1e0bc8c1757..47870e29c290 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -36,7 +36,7 @@ config FSL_MC_DPIO
config DPAA2_CONSOLE
tristate "QorIQ DPAA2 console driver"
depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
- default y
+ default ARCH_LAYERSCAPE
help
Console driver for DPAA2 platforms. Exports 2 char devices,
/dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 4dc8aba33d9b..9be240999f87 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1270,7 +1270,7 @@ static int qman_create_portal(struct qman_portal *portal,
qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
- portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
+ portal->cgrs = kmalloc_array(2, sizeof(*portal->cgrs), GFP_KERNEL);
if (!portal->cgrs)
goto fail_cgrs;
/* initial snapshot is no-depletion */
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index e4b6ff2cc76b..4068b501a3a3 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -232,11 +232,6 @@ static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
iowrite32be(value, base + (reg >> 2));
}
-static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
-{
- return irq_get_chip_data(virq);
-}
-
static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
{
return irq_data_get_irq_chip_data(d);
@@ -455,13 +450,11 @@ static int qe_ic_init(struct platform_device *pdev)
qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
- irq_set_handler_data(qe_ic->virq_low, qe_ic);
- irq_set_chained_handler(qe_ic->virq_low, low_handler);
+ irq_set_chained_handler_and_data(qe_ic->virq_low, low_handler, qe_ic);
- if (high_handler) {
- irq_set_handler_data(qe_ic->virq_high, qe_ic);
- irq_set_chained_handler(qe_ic->virq_high, high_handler);
- }
+ if (high_handler)
+ irq_set_chained_handler_and_data(qe_ic->virq_high,
+ high_handler, qe_ic);
return 0;
}
static const struct of_device_id qe_ic_ids[] = {
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index 444a8f59b7da..7fc353732d55 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -167,10 +167,6 @@ static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg)
static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
{
- struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
-
- if (cl_info->pcc_comm_addr)
- iounmap(cl_info->pcc_comm_addr);
pcc_mbox_free_channel(hdev->cl_info.pcc_chan);
}
@@ -179,6 +175,7 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
struct mbox_client *cl = &cl_info->client;
struct pcc_mbox_chan *pcc_chan;
+ struct mbox_chan *mbox_chan;
struct device *dev = hdev->dev;
int rc;
@@ -196,7 +193,7 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
goto out;
}
cl_info->pcc_chan = pcc_chan;
- cl_info->mbox_chan = pcc_chan->mchan;
+ mbox_chan = pcc_chan->mchan;
/*
* pcc_chan->latency is just a nominal value. In reality the remote
@@ -206,34 +203,24 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
cl_info->deadline_us =
HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency;
if (!hdev->verspec_data->has_txdone_irq &&
- cl_info->mbox_chan->mbox->txdone_irq) {
+ mbox_chan->mbox->txdone_irq) {
dev_err(dev, "PCC IRQ in PCCT is enabled.\n");
rc = -EINVAL;
goto err_mbx_channel_free;
} else if (hdev->verspec_data->has_txdone_irq &&
- !cl_info->mbox_chan->mbox->txdone_irq) {
+ !mbox_chan->mbox->txdone_irq) {
dev_err(dev, "PCC IRQ in PCCT isn't supported.\n");
rc = -EINVAL;
goto err_mbx_channel_free;
}
- if (!pcc_chan->shmem_base_addr ||
- pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) {
- dev_err(dev, "The base address or size (%llu) of PCC communication region is invalid.\n",
- pcc_chan->shmem_size);
+ if (pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) {
+ dev_err(dev, "Base size (%llu) of PCC communication region must be %d bytes.\n",
+ pcc_chan->shmem_size, HCCS_PCC_SHARE_MEM_BYTES);
rc = -EINVAL;
goto err_mbx_channel_free;
}
- cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!cl_info->pcc_comm_addr) {
- dev_err(dev, "Failed to ioremap PCC communication region for channel-%u.\n",
- hdev->chan_id);
- rc = -ENOMEM;
- goto err_mbx_channel_free;
- }
-
return 0;
err_mbx_channel_free:
@@ -246,7 +233,7 @@ static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev)
{
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
struct acpi_pcct_shared_memory __iomem *comm_base =
- cl_info->pcc_comm_addr;
+ cl_info->pcc_chan->shmem;
u16 status;
int ret;
@@ -289,7 +276,7 @@ static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev,
.status = 0,
};
- memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
+ memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp,
sizeof(struct acpi_pcct_shared_memory));
/* Copy the message to the PCC comm space */
@@ -309,7 +296,7 @@ static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev,
.command = cmd,
};
- memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
+ memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp,
sizeof(struct acpi_pcct_ext_pcc_shared_memory));
/* Copy the message to the PCC comm space */
@@ -321,12 +308,13 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
{
const struct hccs_verspecific_data *verspec_data = hdev->verspec_data;
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+ struct mbox_chan *mbox_chan = cl_info->pcc_chan->mchan;
struct hccs_fw_inner_head *fw_inner_head;
void __iomem *comm_space;
u16 space_size;
int ret;
- comm_space = cl_info->pcc_comm_addr + verspec_data->shared_mem_size;
+ comm_space = cl_info->pcc_chan->shmem + verspec_data->shared_mem_size;
space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size;
verspec_data->fill_pcc_shared_mem(hdev, cmd, desc,
comm_space, space_size);
@@ -334,7 +322,7 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
reinit_completion(&cl_info->done);
/* Ring doorbell */
- ret = mbox_send_message(cl_info->mbox_chan, &cmd);
+ ret = mbox_send_message(mbox_chan, &cmd);
if (ret < 0) {
dev_err(hdev->dev, "Send PCC mbox message failed, ret = %d.\n",
ret);
@@ -356,9 +344,9 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
end:
if (verspec_data->has_txdone_irq)
- mbox_chan_txdone(cl_info->mbox_chan, ret);
+ mbox_chan_txdone(mbox_chan, ret);
else
- mbox_client_txdone(cl_info->mbox_chan, ret);
+ mbox_client_txdone(mbox_chan, ret);
return ret;
}
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.h b/drivers/soc/hisilicon/kunpeng_hccs.h
index dc267136919b..f0a9a5618d97 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.h
+++ b/drivers/soc/hisilicon/kunpeng_hccs.h
@@ -60,10 +60,8 @@ struct hccs_chip_info {
struct hccs_mbox_client_info {
struct mbox_client client;
- struct mbox_chan *mbox_chan;
struct pcc_mbox_chan *pcc_chan;
u64 deadline_us;
- void __iomem *pcc_comm_addr;
struct completion done;
};
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 3ed8161d7d28..04a1b60f2f2b 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -24,13 +24,21 @@
#define OCOTP_UID_HIGH 0x420
#define IMX8MP_OCOTP_UID_OFFSET 0x10
+#define IMX8MP_OCOTP_UID_HIGH 0xE00
/* Same as ANADIG_DIGPROG_IMX7D */
#define ANADIG_DIGPROG_IMX8MM 0x800
struct imx8_soc_data {
char *name;
- int (*soc_revision)(u32 *socrev, u64 *socuid);
+ const char *ocotp_compatible;
+ int (*soc_revision)(struct platform_device *pdev, u32 *socrev);
+ int (*soc_uid)(struct platform_device *pdev, u64 *socuid);
+};
+
+struct imx8_soc_drvdata {
+ void __iomem *ocotp_base;
+ struct clk *clk;
};
#ifdef CONFIG_HAVE_ARM_SMCCC
@@ -49,30 +57,24 @@ static u32 imx8mq_soc_revision_from_atf(void)
static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
#endif
-static int imx8mq_soc_revision(u32 *socrev, u64 *socuid)
+static int imx8m_soc_uid(struct platform_device *pdev, u64 *socuid)
{
- struct device_node *np __free(device_node) =
- of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
- void __iomem *ocotp_base;
- u32 magic;
- u32 rev;
- struct clk *clk;
- int ret;
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ void __iomem *ocotp_base = drvdata->ocotp_base;
- if (!np)
- return -EINVAL;
-
- ocotp_base = of_iomap(np, 0);
- if (!ocotp_base)
- return -EINVAL;
+ *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ *socuid <<= 32;
+ *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
- clk = of_clk_get_by_name(np, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto err_clk;
- }
+ return 0;
+}
- clk_prepare_enable(clk);
+static int imx8mq_soc_revision(struct platform_device *pdev, u32 *socrev)
+{
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ void __iomem *ocotp_base = drvdata->ocotp_base;
+ u32 magic;
+ u32 rev;
/*
* SOC revision on older imx8mq is not available in fuses so query
@@ -85,98 +87,109 @@ static int imx8mq_soc_revision(u32 *socrev, u64 *socuid)
rev = REV_B1;
}
- *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
- *socuid <<= 32;
- *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
-
*socrev = rev;
- clk_disable_unprepare(clk);
- clk_put(clk);
- iounmap(ocotp_base);
-
return 0;
+}
-err_clk:
- iounmap(ocotp_base);
- return ret;
+static int imx8mp_soc_uid(struct platform_device *pdev, u64 *socuid)
+{
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ void __iomem *ocotp_base = drvdata->ocotp_base;
+
+ socuid[0] = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + IMX8MP_OCOTP_UID_OFFSET);
+ socuid[0] <<= 32;
+ socuid[0] |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + IMX8MP_OCOTP_UID_OFFSET);
+
+ socuid[1] = readl_relaxed(ocotp_base + IMX8MP_OCOTP_UID_HIGH + 0x10);
+ socuid[1] <<= 32;
+ socuid[1] |= readl_relaxed(ocotp_base + IMX8MP_OCOTP_UID_HIGH);
+
+ return 0;
}
-static int imx8mm_soc_uid(u64 *socuid)
+static int imx8mm_soc_revision(struct platform_device *pdev, u32 *socrev)
{
struct device_node *np __free(device_node) =
- of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
- void __iomem *ocotp_base;
- struct clk *clk;
- int ret = 0;
- u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
- IMX8MP_OCOTP_UID_OFFSET : 0;
+ of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ void __iomem *anatop_base;
if (!np)
return -EINVAL;
- ocotp_base = of_iomap(np, 0);
- if (!ocotp_base)
+ anatop_base = of_iomap(np, 0);
+ if (!anatop_base)
return -EINVAL;
- clk = of_clk_get_by_name(np, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto err_clk;
- }
-
- clk_prepare_enable(clk);
-
- *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
- *socuid <<= 32;
- *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
+ *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
- clk_disable_unprepare(clk);
- clk_put(clk);
+ iounmap(anatop_base);
-err_clk:
- iounmap(ocotp_base);
- return ret;
+ return 0;
}
-static int imx8mm_soc_revision(u32 *socrev, u64 *socuid)
+static int imx8m_soc_prepare(struct platform_device *pdev, const char *ocotp_compatible)
{
struct device_node *np __free(device_node) =
- of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
- void __iomem *anatop_base;
+ of_find_compatible_node(NULL, NULL, ocotp_compatible);
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ int ret = 0;
if (!np)
return -EINVAL;
- anatop_base = of_iomap(np, 0);
- if (!anatop_base)
+ drvdata->ocotp_base = of_iomap(np, 0);
+ if (!drvdata->ocotp_base)
return -EINVAL;
- *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
+ drvdata->clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(drvdata->clk)) {
+ ret = PTR_ERR(drvdata->clk);
+ goto err_clk;
+ }
- iounmap(anatop_base);
+ return clk_prepare_enable(drvdata->clk);
+
+err_clk:
+ iounmap(drvdata->ocotp_base);
+ return ret;
+}
+
+static void imx8m_soc_unprepare(struct platform_device *pdev)
+{
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
- return imx8mm_soc_uid(socuid);
+ clk_disable_unprepare(drvdata->clk);
+ clk_put(drvdata->clk);
+ iounmap(drvdata->ocotp_base);
}
static const struct imx8_soc_data imx8mq_soc_data = {
.name = "i.MX8MQ",
+ .ocotp_compatible = "fsl,imx8mq-ocotp",
.soc_revision = imx8mq_soc_revision,
+ .soc_uid = imx8m_soc_uid,
};
static const struct imx8_soc_data imx8mm_soc_data = {
.name = "i.MX8MM",
+ .ocotp_compatible = "fsl,imx8mm-ocotp",
.soc_revision = imx8mm_soc_revision,
+ .soc_uid = imx8m_soc_uid,
};
static const struct imx8_soc_data imx8mn_soc_data = {
.name = "i.MX8MN",
+ .ocotp_compatible = "fsl,imx8mm-ocotp",
.soc_revision = imx8mm_soc_revision,
+ .soc_uid = imx8m_soc_uid,
};
static const struct imx8_soc_data imx8mp_soc_data = {
.name = "i.MX8MP",
+ .ocotp_compatible = "fsl,imx8mm-ocotp",
.soc_revision = imx8mm_soc_revision,
+ .soc_uid = imx8mp_soc_uid,
};
static __maybe_unused const struct of_device_id imx8_soc_match[] = {
@@ -207,17 +220,24 @@ static int imx8m_soc_probe(struct platform_device *pdev)
struct soc_device_attribute *soc_dev_attr;
struct platform_device *cpufreq_dev;
const struct imx8_soc_data *data;
+ struct imx8_soc_drvdata *drvdata;
struct device *dev = &pdev->dev;
const struct of_device_id *id;
struct soc_device *soc_dev;
u32 soc_rev = 0;
- u64 soc_uid = 0;
+ u64 soc_uid[2] = {0, 0};
int ret;
soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, drvdata);
+
soc_dev_attr->family = "Freescale i.MX";
ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
@@ -231,18 +251,37 @@ static int imx8m_soc_probe(struct platform_device *pdev)
data = id->data;
if (data) {
soc_dev_attr->soc_id = data->name;
+ ret = imx8m_soc_prepare(pdev, data->ocotp_compatible);
+ if (ret)
+ return ret;
+
if (data->soc_revision) {
- ret = data->soc_revision(&soc_rev, &soc_uid);
- if (ret)
+ ret = data->soc_revision(pdev, &soc_rev);
+ if (ret) {
+ imx8m_soc_unprepare(pdev);
+ return ret;
+ }
+ }
+ if (data->soc_uid) {
+ ret = data->soc_uid(pdev, soc_uid);
+ if (ret) {
+ imx8m_soc_unprepare(pdev);
return ret;
+ }
}
+ imx8m_soc_unprepare(pdev);
}
soc_dev_attr->revision = imx8_revision(dev, soc_rev);
if (!soc_dev_attr->revision)
return -ENOMEM;
- soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX", soc_uid);
+ if (soc_uid[1])
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX%016llX",
+ soc_uid[1], soc_uid[0]);
+ else
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX",
+ soc_uid[0]);
if (!soc_dev_attr->serial_number)
return -ENOMEM;
diff --git a/drivers/soc/mediatek/mtk-dvfsrc.c b/drivers/soc/mediatek/mtk-dvfsrc.c
index 83bf46fdcf2d..41add5636b03 100644
--- a/drivers/soc/mediatek/mtk-dvfsrc.c
+++ b/drivers/soc/mediatek/mtk-dvfsrc.c
@@ -446,6 +446,46 @@ static int mtk_dvfsrc_probe(struct platform_device *pdev)
return 0;
}
+static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v1 = { 0, 0, 0 };
+static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v2 = {
+ .max_dram_nom_bw = 255,
+ .max_dram_peak_bw = 255,
+ .max_dram_hrt_bw = 1023,
+};
+
+static const struct dvfsrc_opp dvfsrc_opp_mt6893_lp4[] = {
+ { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 },
+ { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1 },
+ { 0, 2 }, { 1, 2 }, { 2, 2 }, { 3, 2 },
+ { 0, 3 }, { 1, 3 }, { 2, 3 }, { 3, 3 },
+ { 1, 4 }, { 2, 4 }, { 3, 4 }, { 2, 5 },
+ { 3, 5 }, { 3, 6 }, { 4, 6 }, { 4, 7 },
+};
+
+static const struct dvfsrc_opp_desc dvfsrc_opp_mt6893_desc[] = {
+ [0] = {
+ .opps = dvfsrc_opp_mt6893_lp4,
+ .num_opp = ARRAY_SIZE(dvfsrc_opp_mt6893_lp4),
+ }
+};
+
+static const struct dvfsrc_soc_data mt6893_data = {
+ .opps_desc = dvfsrc_opp_mt6893_desc,
+ .regs = dvfsrc_mt8195_regs,
+ .get_target_level = dvfsrc_get_target_level_v2,
+ .get_current_level = dvfsrc_get_current_level_v2,
+ .get_vcore_level = dvfsrc_get_vcore_level_v2,
+ .get_vscp_level = dvfsrc_get_vscp_level_v2,
+ .set_dram_bw = dvfsrc_set_dram_bw_v1,
+ .set_dram_peak_bw = dvfsrc_set_dram_peak_bw_v1,
+ .set_dram_hrt_bw = dvfsrc_set_dram_hrt_bw_v1,
+ .set_vcore_level = dvfsrc_set_vcore_level_v2,
+ .set_vscp_level = dvfsrc_set_vscp_level_v2,
+ .wait_for_opp_level = dvfsrc_wait_for_opp_level_v2,
+ .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1,
+ .bw_constraints = &dvfsrc_bw_constr_v2,
+};
+
static const struct dvfsrc_opp dvfsrc_opp_mt8183_lp4[] = {
{ 0, 0 }, { 0, 1 }, { 0, 2 }, { 1, 2 },
};
@@ -469,8 +509,6 @@ static const struct dvfsrc_opp_desc dvfsrc_opp_mt8183_desc[] = {
}
};
-static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_mt8183 = { 0, 0, 0 };
-
static const struct dvfsrc_soc_data mt8183_data = {
.opps_desc = dvfsrc_opp_mt8183_desc,
.regs = dvfsrc_mt8183_regs,
@@ -482,7 +520,7 @@ static const struct dvfsrc_soc_data mt8183_data = {
.set_vcore_level = dvfsrc_set_vcore_level_v1,
.wait_for_opp_level = dvfsrc_wait_for_opp_level_v1,
.wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1,
- .bw_constraints = &dvfsrc_bw_constr_mt8183,
+ .bw_constraints = &dvfsrc_bw_constr_v1,
};
static const struct dvfsrc_opp dvfsrc_opp_mt8195_lp4[] = {
@@ -501,12 +539,6 @@ static const struct dvfsrc_opp_desc dvfsrc_opp_mt8195_desc[] = {
}
};
-static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_mt8195 = {
- .max_dram_nom_bw = 255,
- .max_dram_peak_bw = 255,
- .max_dram_hrt_bw = 1023,
-};
-
static const struct dvfsrc_soc_data mt8195_data = {
.opps_desc = dvfsrc_opp_mt8195_desc,
.regs = dvfsrc_mt8195_regs,
@@ -521,10 +553,11 @@ static const struct dvfsrc_soc_data mt8195_data = {
.set_vscp_level = dvfsrc_set_vscp_level_v2,
.wait_for_opp_level = dvfsrc_wait_for_opp_level_v2,
.wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1,
- .bw_constraints = &dvfsrc_bw_constr_mt8195,
+ .bw_constraints = &dvfsrc_bw_constr_v2,
};
static const struct of_device_id mtk_dvfsrc_of_match[] = {
+ { .compatible = "mediatek,mt6893-dvfsrc", .data = &mt6893_data },
{ .compatible = "mediatek,mt8183-dvfsrc", .data = &mt8183_data },
{ .compatible = "mediatek,mt8195-dvfsrc", .data = &mt8195_data },
{ /* sentinel */ }
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 56823b6a2fac..192edc3f64dc 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -35,6 +35,11 @@
#define ATTR0_RES_WAYS_MASK GENMASK(15, 0)
#define ATTR0_BONUS_WAYS_MASK GENMASK(31, 16)
#define ATTR0_BONUS_WAYS_SHIFT 16
+#define ATTR2_PROBE_TARGET_WAYS_MASK BIT(4)
+#define ATTR2_FIXED_SIZE_MASK BIT(8)
+#define ATTR2_PRIORITY_MASK GENMASK(14, 12)
+#define ATTR2_PARENT_SCID_MASK GENMASK(21, 16)
+#define ATTR2_IN_A_GROUP_MASK BIT(24)
#define LLCC_STATUS_READ_DELAY 100
#define CACHE_LINE_SIZE_SHIFT 6
@@ -49,6 +54,10 @@
#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_4 * n)
+#define LLCC_V6_TRP_ATTR0_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR0_CFG] + SZ_64 * (n))
+#define LLCC_V6_TRP_ATTR1_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR1_CFG] + SZ_64 * (n))
+#define LLCC_V6_TRP_ATTR2_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR2_CFG] + SZ_64 * (n))
+#define LLCC_V6_TRP_ATTR3_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR3_CFG] + SZ_64 * (n))
#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
#define LLCC_TRP_PCB_ACT 0x21f04
@@ -66,6 +75,7 @@
#define LLCC_VERSION_2_0_0_0 0x02000000
#define LLCC_VERSION_2_1_0_0 0x02010000
#define LLCC_VERSION_4_1_0_0 0x04010000
+#define LLCC_VERSION_6_0_0_0 0X06000000
/**
* struct llcc_slice_config - Data associated with the llcc slice
@@ -106,6 +116,7 @@
* ovcap_en.
* @vict_prio: When current scid is under-capacity, allocate over other
* lower-than victim priority-line threshold scid.
+ * @parent_slice_id: For grouped slices, specifies the slice id of the parent.
*/
struct llcc_slice_config {
u32 usecase_id;
@@ -130,6 +141,7 @@ struct llcc_slice_config {
bool ovcap_en;
bool ovcap_prio;
bool vict_prio;
+ u32 parent_slice_id;
};
struct qcom_llcc_config {
@@ -153,6 +165,21 @@ struct qcom_sct_config {
enum llcc_reg_offset {
LLCC_COMMON_HW_INFO,
LLCC_COMMON_STATUS0,
+ LLCC_TRP_ATTR0_CFG,
+ LLCC_TRP_ATTR1_CFG,
+ LLCC_TRP_ATTR2_CFG,
+ LLCC_TRP_ATTR3_CFG,
+ LLCC_TRP_SID_DIS_CAP_ALLOC,
+ LLCC_TRP_ALGO_STALE_EN,
+ LLCC_TRP_ALGO_STALE_CAP_EN,
+ LLCC_TRP_ALGO_MRU0,
+ LLCC_TRP_ALGO_MRU1,
+ LLCC_TRP_ALGO_ALLOC0,
+ LLCC_TRP_ALGO_ALLOC1,
+ LLCC_TRP_ALGO_ALLOC2,
+ LLCC_TRP_ALGO_ALLOC3,
+ LLCC_TRP_WRS_EN,
+ LLCC_TRP_WRS_CACHEABLE_EN,
};
static const struct llcc_slice_config ipq5424_data[] = {
@@ -2662,6 +2689,263 @@ static const struct llcc_slice_config sm8650_data[] = {
},
};
+static const struct llcc_slice_config sm8750_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 5120,
+ .priority = 1,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 24,
+ .max_cap = 1024,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 35,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 5,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 34,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 5632,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .write_scid_en = true,
+ .write_scid_cacheable_en = true
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 7168,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .stale_en = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CAMFW,
+ .slice_id = 20,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 27,
+ .max_cap = 256,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xf0000000,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 800,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf0000000,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 19,
+ .max_cap = 64,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CMPTHCP,
+ .slice_id = 15,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 7936,
+ .priority = 7,
+ .fixed_size = true,
+ .bonus_ways = 0x7fffffff,
+ }, {
+ .usecase_id = LLCC_DISP_WB,
+ .slice_id = 23,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_VIDVSP,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_VIDDEC,
+ .slice_id = 5,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMOFE,
+ .slice_id = 33,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTIP,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMSRTIP,
+ .slice_id = 14,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTRF,
+ .slice_id = 7,
+ .max_cap = 3584,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMSRTRF,
+ .slice_id = 21,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CPUSSMPAM,
+ .slice_id = 6,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ },
+};
+
static const struct llcc_slice_config qcs615_data[] = {
{
.usecase_id = LLCC_CPUSS,
@@ -3161,6 +3445,33 @@ static const struct llcc_edac_reg_offset llcc_v2_1_edac_reg_offset = {
.drp_ecc_db_err_syn0 = 0x52120,
};
+static const struct llcc_edac_reg_offset llcc_v6_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x47448,
+ .trp_ecc_error_status1 = 0x47450,
+ .trp_ecc_sb_err_syn0 = 0x47490,
+ .trp_ecc_db_err_syn0 = 0x474d0,
+ .trp_ecc_error_cntr_clear = 0x47444,
+ .trp_interrupt_0_status = 0x47600,
+ .trp_interrupt_0_clear = 0x47604,
+ .trp_interrupt_0_enable = 0x47608,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x6400c,
+ .cmn_interrupt_0_enable = 0x6401c,
+ .cmn_interrupt_2_enable = 0x6403c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x80000,
+ .drp_ecc_error_cntr_clear = 0x80004,
+ .drp_interrupt_status = 0x80020,
+ .drp_interrupt_clear = 0x80028,
+ .drp_interrupt_enable = 0x8002c,
+ .drp_ecc_error_status0 = 0x820f4,
+ .drp_ecc_error_status1 = 0x820f8,
+ .drp_ecc_sb_err_syn0 = 0x820fc,
+ .drp_ecc_db_err_syn0 = 0x82120,
+};
+
/* LLCC register offset starting from v1.0.0 */
static const u32 llcc_v1_reg_offset[] = {
[LLCC_COMMON_HW_INFO] = 0x00030000,
@@ -3173,6 +3484,27 @@ static const u32 llcc_v2_1_reg_offset[] = {
[LLCC_COMMON_STATUS0] = 0x0003400c,
};
+/* LLCC register offset starting from v6.0.0 */
+static const u32 llcc_v6_reg_offset[] = {
+ [LLCC_COMMON_HW_INFO] = 0x00064000,
+ [LLCC_COMMON_STATUS0] = 0x0006400c,
+ [LLCC_TRP_ATTR0_CFG] = 0x00041000,
+ [LLCC_TRP_ATTR1_CFG] = 0x00041008,
+ [LLCC_TRP_ATTR2_CFG] = 0x00041010,
+ [LLCC_TRP_ATTR3_CFG] = 0x00041014,
+ [LLCC_TRP_SID_DIS_CAP_ALLOC] = 0x00042000,
+ [LLCC_TRP_ALGO_STALE_EN] = 0x00042008,
+ [LLCC_TRP_ALGO_STALE_CAP_EN] = 0x00042010,
+ [LLCC_TRP_ALGO_MRU0] = 0x00042018,
+ [LLCC_TRP_ALGO_MRU1] = 0x00042020,
+ [LLCC_TRP_ALGO_ALLOC0] = 0x00042028,
+ [LLCC_TRP_ALGO_ALLOC1] = 0x00042030,
+ [LLCC_TRP_ALGO_ALLOC2] = 0x00042038,
+ [LLCC_TRP_ALGO_ALLOC3] = 0x00042040,
+ [LLCC_TRP_WRS_EN] = 0x00042080,
+ [LLCC_TRP_WRS_CACHEABLE_EN] = 0x00042088,
+};
+
static const struct qcom_llcc_config qcs615_cfg[] = {
{
.sct_data = qcs615_data,
@@ -3379,6 +3711,16 @@ static const struct qcom_llcc_config sm8650_cfg[] = {
},
};
+static const struct qcom_llcc_config sm8750_cfg[] = {
+ {
+ .sct_data = sm8750_data,
+ .size = ARRAY_SIZE(sm8750_data),
+ .skip_llcc_cfg = false,
+ .reg_offset = llcc_v6_reg_offset,
+ .edac_reg_offset = &llcc_v6_edac_reg_offset,
+ },
+};
+
static const struct qcom_llcc_config x1e80100_cfg[] = {
{
.sct_data = x1e80100_data,
@@ -3489,6 +3831,11 @@ static const struct qcom_sct_config sm8650_cfgs = {
.num_config = ARRAY_SIZE(sm8650_cfg),
};
+static const struct qcom_sct_config sm8750_cfgs = {
+ .llcc_config = sm8750_cfg,
+ .num_config = ARRAY_SIZE(sm8750_cfg),
+};
+
static const struct qcom_sct_config x1e80100_cfgs = {
.llcc_config = x1e80100_cfg,
.num_config = ARRAY_SIZE(x1e80100_cfg),
@@ -3869,6 +4216,139 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
return ret;
}
+static int _qcom_llcc_cfg_program_v6(const struct llcc_slice_config *config,
+ const struct qcom_llcc_config *cfg)
+{
+ u32 stale_en, stale_cap_en, mru_uncap_en, mru_rollover;
+ u32 alloc_oneway_en, ovcap_en, ovcap_prio, vict_prio;
+ u32 attr0_cfg, attr1_cfg, attr2_cfg, attr3_cfg;
+ u32 attr0_val, attr1_val, attr2_val, attr3_val;
+ u32 slice_offset, reg_offset;
+ struct llcc_slice_desc *desc;
+ u32 wren, wr_cache_en;
+ int ret;
+
+ attr0_cfg = LLCC_V6_TRP_ATTR0_CFGn(config->slice_id);
+ attr1_cfg = LLCC_V6_TRP_ATTR1_CFGn(config->slice_id);
+ attr2_cfg = LLCC_V6_TRP_ATTR2_CFGn(config->slice_id);
+ attr3_cfg = LLCC_V6_TRP_ATTR3_CFGn(config->slice_id);
+
+ attr0_val = config->res_ways;
+ attr1_val = config->bonus_ways;
+ attr2_val = config->cache_mode;
+ attr2_val |= FIELD_PREP(ATTR2_PROBE_TARGET_WAYS_MASK, config->probe_target_ways);
+ attr2_val |= FIELD_PREP(ATTR2_FIXED_SIZE_MASK, config->fixed_size);
+ attr2_val |= FIELD_PREP(ATTR2_PRIORITY_MASK, config->priority);
+
+ if (config->parent_slice_id && config->fixed_size) {
+ attr2_val |= FIELD_PREP(ATTR2_PARENT_SCID_MASK, config->parent_slice_id);
+ attr2_val |= ATTR2_IN_A_GROUP_MASK;
+ }
+
+ attr3_val = MAX_CAP_TO_BYTES(config->max_cap);
+ attr3_val /= drv_data->num_banks;
+ attr3_val >>= CACHE_LINE_SIZE_SHIFT;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr2_cfg, attr2_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr3_cfg, attr3_val);
+ if (ret)
+ return ret;
+
+ slice_offset = config->slice_id % 32;
+ reg_offset = (config->slice_id / 32) * 4;
+
+ wren = config->write_scid_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_WRS_EN] + reg_offset,
+ BIT(slice_offset), wren);
+ if (ret)
+ return ret;
+
+ wr_cache_en = config->write_scid_cacheable_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_WRS_CACHEABLE_EN] + reg_offset,
+ BIT(slice_offset), wr_cache_en);
+ if (ret)
+ return ret;
+
+ stale_en = config->stale_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_STALE_EN] + reg_offset,
+ BIT(slice_offset), stale_en);
+ if (ret)
+ return ret;
+
+ stale_cap_en = config->stale_cap_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_STALE_CAP_EN] + reg_offset,
+ BIT(slice_offset), stale_cap_en);
+ if (ret)
+ return ret;
+
+ mru_uncap_en = config->mru_uncap_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_MRU0] + reg_offset,
+ BIT(slice_offset), mru_uncap_en);
+ if (ret)
+ return ret;
+
+ mru_rollover = config->mru_rollover << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_MRU1] + reg_offset,
+ BIT(slice_offset), mru_rollover);
+ if (ret)
+ return ret;
+
+ alloc_oneway_en = config->alloc_oneway_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC0] + reg_offset,
+ BIT(slice_offset), alloc_oneway_en);
+ if (ret)
+ return ret;
+
+ ovcap_en = config->ovcap_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC1] + reg_offset,
+ BIT(slice_offset), ovcap_en);
+ if (ret)
+ return ret;
+
+ ovcap_prio = config->ovcap_prio << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC2] + reg_offset,
+ BIT(slice_offset), ovcap_prio);
+ if (ret)
+ return ret;
+
+ vict_prio = config->vict_prio << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC3] + reg_offset,
+ BIT(slice_offset), vict_prio);
+ if (ret)
+ return ret;
+
+ if (config->activate_on_init) {
+ desc = llcc_slice_getd(config->usecase_id);
+ if (PTR_ERR_OR_ZERO(desc))
+ return -EINVAL;
+
+ ret = llcc_slice_activate(desc);
+ }
+
+ return ret;
+}
+
static int qcom_llcc_cfg_program(struct platform_device *pdev,
const struct qcom_llcc_config *cfg)
{
@@ -3880,10 +4360,18 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev,
sz = drv_data->cfg_size;
llcc_table = drv_data->cfg;
- for (i = 0; i < sz; i++) {
- ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
- if (ret)
- return ret;
+ if (drv_data->version >= LLCC_VERSION_6_0_0_0) {
+ for (i = 0; i < sz; i++) {
+ ret = _qcom_llcc_cfg_program_v6(&llcc_table[i], cfg);
+ if (ret)
+ return ret;
+ }
+ } else {
+ for (i = 0; i < sz; i++) {
+ ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
+ if (ret)
+ return ret;
+ }
}
return ret;
@@ -4102,6 +4590,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfgs },
{ .compatible = "qcom,sm8550-llcc", .data = &sm8550_cfgs },
{ .compatible = "qcom,sm8650-llcc", .data = &sm8650_cfgs },
+ { .compatible = "qcom,sm8750-llcc", .data = &sm8750_cfgs },
{ .compatible = "qcom,x1e80100-llcc", .data = &x1e80100_cfgs },
{ }
};
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index cde19cdfd3c7..0a6d325b195c 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -371,15 +371,11 @@ static void pmic_glink_remove(struct platform_device *pdev)
__pmic_glink = NULL;
}
-static const unsigned long pmic_glink_sc8280xp_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
- BIT(PMIC_GLINK_CLIENT_ALTMODE);
-
static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
BIT(PMIC_GLINK_CLIENT_ALTMODE) |
BIT(PMIC_GLINK_CLIENT_UCSI);
static const struct of_device_id pmic_glink_of_match[] = {
- { .compatible = "qcom,sc8280xp-pmic-glink", .data = &pmic_glink_sc8280xp_client_mask },
{ .compatible = "qcom,pmic-glink", .data = &pmic_glink_sm8450_client_mask },
{}
};
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index bd06ce161804..7f11acd33323 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -218,21 +218,29 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
{
struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work);
struct pmic_glink_altmode *altmode = alt_port->altmode;
+ enum drm_connector_status conn_status;
typec_switch_set(alt_port->typec_switch, alt_port->orientation);
- if (alt_port->svid == USB_TYPEC_DP_SID && alt_port->mode == 0xff)
- pmic_glink_altmode_safe(altmode, alt_port);
- else if (alt_port->svid == USB_TYPEC_DP_SID)
- pmic_glink_altmode_enable_dp(altmode, alt_port, alt_port->mode,
- alt_port->hpd_state, alt_port->hpd_irq);
- else
- pmic_glink_altmode_enable_usb(altmode, alt_port);
+ if (alt_port->svid == USB_TYPEC_DP_SID) {
+ if (alt_port->mode == 0xff) {
+ pmic_glink_altmode_safe(altmode, alt_port);
+ } else {
+ pmic_glink_altmode_enable_dp(altmode, alt_port,
+ alt_port->mode,
+ alt_port->hpd_state,
+ alt_port->hpd_irq);
+ }
- drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
- alt_port->hpd_state ?
- connector_status_connected :
- connector_status_disconnected);
+ if (alt_port->hpd_state)
+ conn_status = connector_status_connected;
+ else
+ conn_status = connector_status_disconnected;
+
+ drm_aux_hpd_bridge_notify(&alt_port->bridge->dev, conn_status);
+ } else {
+ pmic_glink_altmode_enable_usb(altmode, alt_port);
+ }
pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
}
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 1d1c438be3e7..3abea241b1c4 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -488,6 +488,16 @@ static const struct qcom_pdm_domain_data *sm6350_domains[] = {
NULL,
};
+static const struct qcom_pdm_domain_data *sm7150_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
static const struct qcom_pdm_domain_data *sm8150_domains[] = {
&adsp_audio_pd,
&adsp_root_pd,
@@ -565,6 +575,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm4250", .data = sm6115_domains, },
{ .compatible = "qcom,sm6115", .data = sm6115_domains, },
{ .compatible = "qcom,sm6350", .data = sm6350_domains, },
+ { .compatible = "qcom,sm7150", .data = sm7150_domains, },
{ .compatible = "qcom,sm7225", .data = sm6350_domains, },
{ .compatible = "qcom,sm7325", .data = sc7280_domains, },
{ .compatible = "qcom,sm8150", .data = sm8150_domains, },
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 592819701809..cf425930539e 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -86,7 +86,7 @@
#define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */
-#define SMEM_HOST_COUNT 20
+#define SMEM_HOST_COUNT 25
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 8c8878bc87f5..cb515c2340c1 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -575,7 +575,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
smp2p->mbox_client.knows_txdone = true;
smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
if (IS_ERR(smp2p->mbox_chan)) {
- if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
+ if (PTR_ERR(smp2p->mbox_chan) != -ENOENT)
return PTR_ERR(smp2p->mbox_chan);
smp2p->mbox_chan = NULL;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 18d7f1be9093..8c4147737c35 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -444,6 +444,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ5302) },
{ qcom_board_id(QCS8550) },
{ qcom_board_id(QCM8550) },
+ { qcom_board_id(SM8750) },
{ qcom_board_id(IPQ5300) },
{ qcom_board_id(IPQ5321) },
{ qcom_board_id(IPQ5424) },
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 4990b85d7df7..fbc3b69d21a7 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -393,6 +393,13 @@ config ARCH_R9A09G047
help
This enables support for the Renesas RZ/G3E SoC variants.
+config ARCH_R9A09G056
+ bool "ARM64 Platform support for RZ/V2N"
+ default y if ARCH_RENESAS
+ select SYS_R9A09G056
+ help
+ This enables support for the Renesas RZ/V2N SoC variants.
+
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
default y if ARCH_RENESAS
@@ -439,6 +446,10 @@ config SYS_R9A09G047
bool "Renesas RZ/G3E System controller support" if COMPILE_TEST
select SYSC_RZ
+config SYS_R9A09G056
+ bool "Renesas RZ/V2N System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
config SYS_R9A09G057
bool "Renesas RZ/V2H System controller support" if COMPILE_TEST
select SYSC_RZ
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 81d4c5726e4c..3bdcc6a395d5 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
obj-$(CONFIG_SYSC_R9A08G045) += r9a08g045-sysc.o
obj-$(CONFIG_SYS_R9A09G047) += r9a09g047-sys.o
+obj-$(CONFIG_SYS_R9A09G056) += r9a09g056-sys.o
obj-$(CONFIG_SYS_R9A09G057) += r9a09g057-sys.o
# Family
diff --git a/drivers/soc/renesas/r9a09g056-sys.c b/drivers/soc/renesas/r9a09g056-sys.c
new file mode 100644
index 000000000000..3ad1422eba36
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g056-sys.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/V2N System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+#define SYS_LSI_MODE_SEC_EN BIT(16)
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_GPU_DIS BIT(0)
+#define SYS_LSI_PRR_ISP_DIS BIT(4)
+
+#define SYS_RZV2N_FEATURE_G31 BIT(0)
+#define SYS_RZV2N_FEATURE_C55 BIT(1)
+#define SYS_RZV2N_FEATURE_SEC BIT(2)
+
+static void rzv2n_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ u32 prr_val, mode_val;
+ u8 feature_flags;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check GPU, ISP and Cryptographic configuration */
+ feature_flags = !(prr_val & SYS_LSI_PRR_GPU_DIS) ? SYS_RZV2N_FEATURE_G31 : 0;
+ feature_flags |= !(prr_val & SYS_LSI_PRR_ISP_DIS) ? SYS_RZV2N_FEATURE_C55 : 0;
+ feature_flags |= (mode_val & SYS_LSI_MODE_SEC_EN) ? SYS_RZV2N_FEATURE_SEC : 0;
+
+ dev_info(dev, "Detected Renesas %s %sn%d Rev %s%s%s%s%s\n", soc_dev_attr->family,
+ soc_dev_attr->soc_id, 41 + feature_flags, soc_dev_attr->revision,
+ feature_flags ? " with" : "",
+ feature_flags & SYS_RZV2N_FEATURE_G31 ? " GE3D (Mali-G31)" : "",
+ feature_flags & SYS_RZV2N_FEATURE_SEC ? " Cryptographic engine" : "",
+ feature_flags & SYS_RZV2N_FEATURE_C55 ? " ISP (Mali-C55)" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzv2n_sys_soc_id_init_data __initconst = {
+ .family = "RZ/V2N",
+ .id = 0x867d447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzv2n_sys_print_id,
+};
+
+const struct rz_sysc_init_data rzv2n_sys_init_data = {
+ .soc_id_init_data = &rzv2n_sys_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
index 14db508f669f..ffa65fb4dade 100644
--- a/drivers/soc/renesas/rz-sysc.c
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -88,6 +88,9 @@ static const struct of_device_id rz_sysc_match[] = {
#ifdef CONFIG_SYS_R9A09G047
{ .compatible = "renesas,r9a09g047-sys", .data = &rzg3e_sys_init_data },
#endif
+#ifdef CONFIG_SYS_R9A09G056
+ { .compatible = "renesas,r9a09g056-sys", .data = &rzv2n_sys_init_data },
+#endif
#ifdef CONFIG_SYS_R9A09G057
{ .compatible = "renesas,r9a09g057-sys", .data = &rzv2h_sys_init_data },
#endif
diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h
index aa83948c5117..56bc047a1bff 100644
--- a/drivers/soc/renesas/rz-sysc.h
+++ b/drivers/soc/renesas/rz-sysc.h
@@ -42,5 +42,6 @@ struct rz_sysc_init_data {
extern const struct rz_sysc_init_data rzg3e_sys_init_data;
extern const struct rz_sysc_init_data rzg3s_sysc_init_data;
extern const struct rz_sysc_init_data rzv2h_sys_init_data;
+extern const struct rz_sysc_init_data rzv2n_sys_init_data;
#endif /* __SOC_RENESAS_RZ_SYSC_H__ */
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index c40313886a01..a77288f49d24 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -7,6 +7,7 @@
#include <linux/array_size.h>
#include <linux/arm-smccc.h>
+#include <linux/cpuhotplug.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/core.h>
@@ -33,6 +34,7 @@ struct exynos_pmu_context {
struct device *dev;
const struct exynos_pmu_data *pmu_data;
struct regmap *pmureg;
+ struct regmap *pmuintrgen;
};
void __iomem *pmu_base_addr;
@@ -222,7 +224,8 @@ static const struct regmap_config regmap_smccfg = {
};
static const struct exynos_pmu_data gs101_pmu_data = {
- .pmu_secure = true
+ .pmu_secure = true,
+ .pmu_cpuhp = true,
};
/*
@@ -326,6 +329,59 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
+/*
+ * CPU_INFORM register hint values which are used by
+ * EL3 firmware (el3mon).
+ */
+#define CPU_INFORM_CLEAR 0
+#define CPU_INFORM_C2 1
+
+static int gs101_cpuhp_pmu_online(unsigned int cpu)
+{
+ unsigned int cpuhint = smp_processor_id();
+ u32 reg, mask;
+
+ /* clear cpu inform hint */
+ regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
+ CPU_INFORM_CLEAR);
+
+ mask = BIT(cpu);
+
+ regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
+ mask, (0 << cpu));
+
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, &reg);
+
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR,
+ reg & mask);
+
+ return 0;
+}
+
+static int gs101_cpuhp_pmu_offline(unsigned int cpu)
+{
+ u32 reg, mask;
+ unsigned int cpuhint = smp_processor_id();
+
+ /* set cpu inform hint */
+ regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
+ CPU_INFORM_C2);
+
+ mask = BIT(cpu);
+ regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
+ mask, BIT(cpu));
+
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
+ reg & mask);
+
+ mask = (BIT(cpu + 8));
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
+ reg & mask);
+ return 0;
+}
+
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -378,6 +434,26 @@ static int exynos_pmu_probe(struct platform_device *pdev)
pmu_context->pmureg = regmap;
pmu_context->dev = dev;
+ if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
+ pmu_context->pmuintrgen = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "google,pmu-intr-gen-syscon");
+ if (IS_ERR(pmu_context->pmuintrgen)) {
+ /*
+ * To maintain support for older DTs that didn't specify syscon phandle
+ * just issue a warning rather than fail to probe.
+ */
+ dev_warn(&pdev->dev, "pmu-intr-gen syscon unavailable\n");
+ } else {
+ cpuhp_setup_state(CPUHP_BP_PREPARE_DYN,
+ "soc/exynos-pmu:prepare",
+ gs101_cpuhp_pmu_online, NULL);
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "soc/exynos-pmu:online",
+ NULL, gs101_cpuhp_pmu_offline);
+ }
+ }
+
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
pmu_context->pmu_data->pmu_init();
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 0a49a2c9a08e..0938bb4fe15f 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -22,6 +22,7 @@ struct exynos_pmu_data {
const struct exynos_pmu_conf *pmu_config;
const struct exynos_pmu_conf *pmu_config_extra;
bool pmu_secure;
+ bool pmu_cpuhp;
void (*pmu_init)(void);
void (*powerdown_conf)(enum sys_powerdown);
diff --git a/drivers/soc/sophgo/Kconfig b/drivers/soc/sophgo/Kconfig
new file mode 100644
index 000000000000..45f78b270c91
--- /dev/null
+++ b/drivers/soc/sophgo/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Sophgo SoC drivers
+#
+
+if ARCH_SOPHGO || COMPILE_TEST
+menu "Sophgo SoC drivers"
+
+config SOPHGO_CV1800_RTCSYS
+ tristate "Sophgo CV1800 RTC MFD"
+ select MFD_CORE
+ help
+ If you say yes here you get support the RTC MFD driver for Sophgo
+ CV1800 series SoC. The RTC module comprises a 32kHz oscillator,
+ Power-on-Reset (PoR) sub-module, HW state machine to control chip
+ power-on, power-off and reset. Furthermore, the 8051 subsystem is
+ located within RTCSYS including associated SRAM block.
+
+ This driver can also be built as a module. If so, the module will be
+ called cv1800-rtcsys.
+
+config SOPHGO_SG2044_TOPSYS
+ tristate "Sophgo SG2044 TOP syscon driver"
+ select MFD_CORE
+ help
+ This is the core driver for the Sophgo SG2044 TOP system
+ controller device. This driver provide PLL clock device
+ for the SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called sg2044-topsys.
+
+endmenu
+endif
diff --git a/drivers/soc/sophgo/Makefile b/drivers/soc/sophgo/Makefile
new file mode 100644
index 000000000000..27f68df22c4d
--- /dev/null
+++ b/drivers/soc/sophgo/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SOPHGO_CV1800_RTCSYS) += cv1800-rtcsys.o
+obj-$(CONFIG_SOPHGO_SG2044_TOPSYS) += sg2044-topsys.o
diff --git a/drivers/soc/sophgo/cv1800-rtcsys.c b/drivers/soc/sophgo/cv1800-rtcsys.c
new file mode 100644
index 000000000000..fdae2e2a61c5
--- /dev/null
+++ b/drivers/soc/sophgo/cv1800-rtcsys.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Sophgo CV1800 series SoC RTC subsystem
+ *
+ * The RTC module comprises a 32kHz oscillator, Power-on-Reset (PoR) sub-module,
+ * HW state machine to control chip power-on, power-off and reset. Furthermore,
+ * the 8051 subsystem is located within RTCSYS including associated SRAM block.
+ *
+ * Copyright (C) 2025 Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ *
+ */
+
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+static struct resource cv1800_rtcsys_irq_resources[] = {
+ DEFINE_RES_IRQ_NAMED(0, "alarm"),
+};
+
+static const struct mfd_cell cv1800_rtcsys_subdev[] = {
+ {
+ .name = "cv1800b-rtc",
+ .num_resources = 1,
+ .resources = &cv1800_rtcsys_irq_resources[0],
+ },
+};
+
+static int cv1800_rtcsys_probe(struct platform_device *pdev)
+{
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "alarm");
+ if (irq < 0)
+ return irq;
+ cv1800_rtcsys_irq_resources[0].start = irq;
+ cv1800_rtcsys_irq_resources[0].end = irq;
+
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ cv1800_rtcsys_subdev,
+ ARRAY_SIZE(cv1800_rtcsys_subdev),
+ NULL, 0, NULL);
+}
+
+static const struct of_device_id cv1800_rtcsys_of_match[] = {
+ { .compatible = "sophgo,cv1800b-rtc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cv1800_rtcsys_of_match);
+
+static struct platform_driver cv1800_rtcsys_mfd = {
+ .probe = cv1800_rtcsys_probe,
+ .driver = {
+ .name = "cv1800_rtcsys",
+ .of_match_table = cv1800_rtcsys_of_match,
+ },
+};
+module_platform_driver(cv1800_rtcsys_mfd);
+
+MODULE_AUTHOR("Alexander Sverdlin <alexander.sverdlin@gmail.com>");
+MODULE_DESCRIPTION("Sophgo CV1800 series SoC RTC subsystem driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/sophgo/sg2044-topsys.c b/drivers/soc/sophgo/sg2044-topsys.c
new file mode 100644
index 000000000000..179f2620b2a9
--- /dev/null
+++ b/drivers/soc/sophgo/sg2044-topsys.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 multi-function system controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/mfd/core.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/resource.h>
+
+static const struct mfd_cell sg2044_topsys_subdev[] = {
+ {
+ .name = "sg2044-pll",
+ },
+};
+
+static int sg2044_topsys_probe(struct platform_device *pdev)
+{
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ sg2044_topsys_subdev,
+ ARRAY_SIZE(sg2044_topsys_subdev),
+ NULL, 0, NULL);
+}
+
+static const struct of_device_id sg2044_topsys_of_match[] = {
+ { .compatible = "sophgo,sg2044-top-syscon" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_topsys_of_match);
+
+static struct platform_driver sg2044_topsys_driver = {
+ .probe = sg2044_topsys_probe,
+ .driver = {
+ .name = "sg2044-topsys",
+ .of_match_table = sg2044_topsys_of_match,
+ },
+};
+module_platform_driver(sg2044_topsys_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 multi-function system controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 82a15cad1c6c..7602b8a909b0 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -1291,7 +1291,7 @@ struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
mutex_lock(&k3_ringacc_list_lock);
list_for_each_entry(entry, &k3_ringacc_list, list)
- if (entry->dev->of_node == ringacc_np) {
+ if (device_match_of_node(entry->dev, ringacc_np)) {
ringacc = entry;
break;
}
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index 704039eb3c07..d716be113c84 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -43,6 +43,7 @@
#define JTAG_ID_PARTNO_AM62AX 0xBB8D
#define JTAG_ID_PARTNO_AM62PX 0xBB9D
#define JTAG_ID_PARTNO_J722S 0xBBA0
+#define JTAG_ID_PARTNO_AM62LX 0xBBA7
static const struct k3_soc_id {
unsigned int id;
@@ -58,6 +59,7 @@ static const struct k3_soc_id {
{ JTAG_ID_PARTNO_AM62AX, "AM62AX" },
{ JTAG_ID_PARTNO_AM62PX, "AM62PX" },
{ JTAG_ID_PARTNO_J722S, "J722S" },
+ { JTAG_ID_PARTNO_AM62LX, "AM62LX" },
};
static const char * const j721e_rev_string_map[] = {
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index ea52425864a9..6e56e7609ccd 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -252,8 +252,7 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
return qh;
err:
- if (qh->stats)
- free_percpu(qh->stats);
+ free_percpu(qh->stats);
devm_kfree(inst->kdev->dev, qh);
return ERR_PTR(ret);
}
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 79dde9a7ec63..5845fc652adc 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -644,11 +644,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
- if (IS_ERR(m3_ipc->mbox)) {
- dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
- PTR_ERR(m3_ipc->mbox));
- return PTR_ERR(m3_ipc->mbox);
- }
+ if (IS_ERR(m3_ipc->mbox))
+ return dev_err_probe(dev, PTR_ERR(m3_ipc->mbox),
+ "IPC Request for A8->M3 Channel failed!\n");
if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
dev_err(&pdev->dev, "could not get rproc phandle\n");
diff --git a/drivers/soc/vt8500/Kconfig b/drivers/soc/vt8500/Kconfig
new file mode 100644
index 000000000000..b4cc0ba1128b
--- /dev/null
+++ b/drivers/soc/vt8500/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_VT8500 || COMPILE_TEST
+
+menu "VIA/WonderMedia SoC drivers"
+
+config WMT_SOCINFO
+ bool "VIA/WonderMedia SoC Information driver"
+ default ARCH_VT8500
+ select SOC_BUS
+ help
+ Say yes to support decoding of VIA/WonderMedia system configuration
+ register information. This currently includes just the chip ID register
+ which helps identify the exact hardware revision of the SoC the kernel
+ is running on (to know if any revision-specific quirks are required)
+
+endmenu
+
+endif
diff --git a/drivers/soc/vt8500/Makefile b/drivers/soc/vt8500/Makefile
new file mode 100644
index 000000000000..05964c5f2890
--- /dev/null
+++ b/drivers/soc/vt8500/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_WMT_SOCINFO) += wmt-socinfo.o
diff --git a/drivers/soc/vt8500/wmt-socinfo.c b/drivers/soc/vt8500/wmt-socinfo.c
new file mode 100644
index 000000000000..461f8c1ae56e
--- /dev/null
+++ b/drivers/soc/vt8500/wmt-socinfo.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2025 Alexey Charkov <alchark@gmail.com>
+ * Based on aspeed-socinfo.c
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+
+static const struct {
+ const char *name;
+ const u32 id;
+} chip_id_table[] = {
+ /* VIA */
+ { "VT8420", 0x3300 },
+ { "VT8430", 0x3357 },
+ { "VT8500", 0x3400 },
+
+ /* WonderMedia */
+ { "WM8425", 0x3429 },
+ { "WM8435", 0x3437 },
+ { "WM8440", 0x3451 },
+ { "WM8505", 0x3426 },
+ { "WM8650", 0x3465 },
+ { "WM8750", 0x3445 },
+ { "WM8850", 0x3481 },
+ { "WM8880", 0x3498 },
+};
+
+static const char *sccid_to_name(u32 sccid)
+{
+ u32 id = sccid >> 16;
+ unsigned int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(chip_id_table) ; ++i) {
+ if (chip_id_table[i].id == id)
+ return chip_id_table[i].name;
+ }
+
+ return "Unknown";
+}
+
+static int wmt_socinfo_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ char letter, digit;
+ void __iomem *reg;
+ u32 sccid;
+
+ reg = devm_of_iomap(&pdev->dev, np, 0, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ sccid = readl(reg);
+
+ attrs = devm_kzalloc(&pdev->dev, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ /*
+ * Machine: VIA APC Rock
+ * Family: WM8850
+ * Revision: A2
+ * SoC ID: raw silicon revision id (34810103 in hexadecimal)
+ */
+
+ attrs->family = sccid_to_name(sccid);
+
+ letter = (sccid >> 8) & 0xf;
+ letter = (letter - 1) + 'A';
+ digit = sccid & 0xff;
+ digit = (digit - 1) + '0';
+ attrs->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%c%c", letter, digit);
+
+ attrs->soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%08x", sccid);
+
+ if (!attrs->revision || !attrs->soc_id)
+ return -ENOMEM;
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ dev_info(&pdev->dev,
+ "VIA/WonderMedia %s rev %s (%s)\n",
+ attrs->family,
+ attrs->revision,
+ attrs->soc_id);
+
+ platform_set_drvdata(pdev, soc_dev);
+ return 0;
+}
+
+static void wmt_socinfo_remove(struct platform_device *pdev)
+{
+ struct soc_device *soc_dev = platform_get_drvdata(pdev);
+
+ soc_device_unregister(soc_dev);
+}
+
+static const struct of_device_id wmt_socinfo_ids[] = {
+ { .compatible = "via,vt8500-scc-id" },
+ { /* Sentinel */ },
+};
+
+static struct platform_driver wmt_socinfo = {
+ .probe = wmt_socinfo_probe,
+ .remove = wmt_socinfo_remove,
+ .driver = {
+ .name = "wmt-socinfo",
+ .of_match_table = wmt_socinfo_ids,
+ },
+};
+module_platform_driver(wmt_socinfo);
+
+MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
+MODULE_DESCRIPTION("VIA/WonderMedia socinfo driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
index fd129650434f..3f747fd61d19 100644
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -1616,6 +1616,7 @@ static void qcom_spi_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq9574_snandc_props = {
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
.supports_bam = true,
};
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index e487231d25dc..fb39d9a19c69 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -3,19 +3,22 @@
* Copyright 2019 Advanced Micro Devices, Inc.
*/
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/psp-tee.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/device.h>
#include <linux/tee_core.h>
#include <linux/types.h>
-#include <linux/mm.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
+
#include "amdtee_private.h"
-#include <linux/psp-tee.h>
static struct amdtee_driver_data *drv_data;
static DEFINE_MUTEX(session_list_mutex);
@@ -458,7 +461,7 @@ static int __init amdtee_driver_init(void)
rc = psp_check_tee_status();
if (rc) {
- pr_err("amd-tee driver: tee not present\n");
+ pr_err("tee not present\n");
return rc;
}
@@ -494,7 +497,6 @@ static int __init amdtee_driver_init(void)
drv_data->amdtee = amdtee;
- pr_info("amd-tee driver initialization successful\n");
return 0;
err_device_unregister:
@@ -510,7 +512,7 @@ err_kfree_drv_data:
kfree(drv_data);
drv_data = NULL;
- pr_err("amd-tee driver initialization failed\n");
+ pr_err("initialization failed\n");
return rc;
}
module_init(amdtee_driver_init);
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index f0c3ac1103bb..26f8f7bbbe56 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1551,8 +1551,7 @@ fw_load:
data_pa_high, data_pa_low, 0, 0, 0, &res);
if (!rc)
rc = res.a0;
- if (fw)
- release_firmware(fw);
+ release_firmware(fw);
kfree(data_buf);
if (!rc) {
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index d113679b1e2d..acc7998758ad 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/tee_core.h>
#include <linux/uaccess.h>
@@ -19,7 +20,7 @@
#define TEE_NUM_DEVICES 32
-#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
+#define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x)))
#define TEE_UUID_NS_NAME_SIZE 128
@@ -487,7 +488,7 @@ static int tee_ioctl_open_session(struct tee_context *ctx,
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
- if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
return -EINVAL;
if (arg.num_params) {
@@ -565,7 +566,7 @@ static int tee_ioctl_invoke(struct tee_context *ctx,
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
- if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
return -EINVAL;
if (arg.num_params) {
@@ -699,7 +700,7 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx,
if (get_user(num_params, &uarg->num_params))
return -EFAULT;
- if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
+ if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) != buf.buf_len)
return -EINVAL;
params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
@@ -798,7 +799,7 @@ static int tee_ioctl_supp_send(struct tee_context *ctx,
get_user(num_params, &uarg->num_params))
return -EFAULT;
- if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
+ if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) > buf.buf_len)
return -EINVAL;
params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 69c1df0f4ca5..aac67a4413ce 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -243,6 +243,9 @@ hv_uio_probe(struct hv_device *dev,
if (!ring_size)
ring_size = SZ_2M;
+ /* Adjust ring size if necessary to have it page aligned */
+ ring_size = VMBUS_RING_SIZE(ring_size);
+
pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -274,13 +277,13 @@ hv_uio_probe(struct hv_device *dev,
pdata->info.mem[INT_PAGE_MAP].name = "int_page";
pdata->info.mem[INT_PAGE_MAP].addr
= (uintptr_t)vmbus_connection.int_page;
- pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[INT_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
pdata->info.mem[MON_PAGE_MAP].addr
= (uintptr_t)vmbus_connection.monitor_pages[1];
- pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[MON_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
if (channel->device_id == HV_NIC) {
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 740311c4fa24..c7a05f842745 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -144,8 +144,8 @@ static struct hid_descriptor hidg_desc = {
.bcdHID = cpu_to_le16(0x0101),
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
- /*.desc[0].bDescriptorType = DYNAMIC */
- /*.desc[0].wDescriptorLenght = DYNAMIC */
+ /*.rpt_desc.bDescriptorType = DYNAMIC */
+ /*.rpt_desc.wDescriptorLength = DYNAMIC */
};
/* Super-Speed Support */
@@ -939,8 +939,8 @@ static int hidg_setup(struct usb_function *f,
struct hid_descriptor hidg_desc_copy = hidg_desc;
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
- hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc_copy.desc[0].wDescriptorLength =
+ hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc_copy.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
length = min_t(unsigned short, length,
@@ -1210,8 +1210,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
* We can use hidg_desc struct here but we should not relay
* that its content won't change after returning from this function.
*/
- hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc.desc[0].wDescriptorLength =
+ hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
hidg_hs_in_ep_desc.bEndpointAddress =
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 8adf6f954633..5ea884ef36af 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -7166,7 +7166,7 @@ static void tcpm_fw_get_timings(struct tcpm_port *port, struct fwnode_handle *fw
static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
{
- struct fwnode_handle *capabilities, *child, *caps = NULL;
+ struct fwnode_handle *capabilities, *caps = NULL;
unsigned int nr_src_pdo, nr_snk_pdo;
const char *opmode_str;
u32 *src_pdo, *snk_pdo;
@@ -7232,9 +7232,7 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode
if (!capabilities) {
port->pd_count = 1;
} else {
- fwnode_for_each_child_node(capabilities, child)
- port->pd_count++;
-
+ port->pd_count = fwnode_get_child_node_count(capabilities);
if (!port->pd_count) {
ret = -ENODATA;
goto put_capabilities;
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index f699e5827ccb..9dc93c5e480b 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -15,7 +15,6 @@
#include <linux/notifier.h>
#include <linux/ctype.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/slab.h>
#ifdef CONFIG_PMAC_BACKLIGHT
@@ -57,10 +56,10 @@
* a hot-key to adjust backlight, the driver must notify the backlight
* core that brightness has changed using backlight_force_update().
*
- * The backlight driver core receives notifications from fbdev and
- * if the event is FB_EVENT_BLANK and if the value of blank, from the
- * FBIOBLANK ioctrl, results in a change in the backlight state the
- * update_status() operation is called.
+ * Display drives can control the backlight device's status using
+ * backlight_notify_blank() and backlight_notify_blank_all(). If this
+ * results in a change in the backlight state the functions call the
+ * update_status() operation.
*/
static struct list_head backlight_dev_list;
@@ -78,85 +77,40 @@ static const char *const backlight_scale_types[] = {
[BACKLIGHT_SCALE_NON_LINEAR] = "non-linear",
};
-#if defined(CONFIG_FB_CORE) || (defined(CONFIG_FB_CORE_MODULE) && \
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
-/*
- * fb_notifier_callback
- *
- * This callback gets called when something important happens inside a
- * framebuffer driver. The backlight core only cares about FB_BLANK_UNBLANK
- * which is reported to the driver using backlight_update_status()
- * as a state change.
- *
- * There may be several fbdev's connected to the backlight device,
- * in which case they are kept track of. A state change is only reported
- * if there is a change in backlight for the specified fbdev.
- */
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data)
+void backlight_notify_blank(struct backlight_device *bd, struct device *display_dev,
+ bool fb_on, bool prev_fb_on)
{
- struct backlight_device *bd;
- struct fb_event *evdata = data;
- struct fb_info *info = evdata->info;
- struct backlight_device *fb_bd = fb_bl_device(info);
- int node = info->node;
- int fb_blank = 0;
-
- /* If we aren't interested in this event, skip it immediately ... */
- if (event != FB_EVENT_BLANK)
- return 0;
-
- bd = container_of(self, struct backlight_device, fb_notif);
- mutex_lock(&bd->ops_lock);
+ guard(mutex)(&bd->ops_lock);
if (!bd->ops)
- goto out;
- if (bd->ops->controls_device && !bd->ops->controls_device(bd, info->device))
- goto out;
- if (fb_bd && fb_bd != bd)
- goto out;
-
- fb_blank = *(int *)evdata->data;
- if (fb_blank == FB_BLANK_UNBLANK && !bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = true;
+ return;
+ if (bd->ops->controls_device && !bd->ops->controls_device(bd, display_dev))
+ return;
+
+ if (fb_on && (!prev_fb_on || !bd->use_count)) {
if (!bd->use_count++) {
bd->props.state &= ~BL_CORE_FBBLANK;
backlight_update_status(bd);
}
- } else if (fb_blank != FB_BLANK_UNBLANK && bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = false;
+ } else if (!fb_on && prev_fb_on && bd->use_count) {
if (!(--bd->use_count)) {
bd->props.state |= BL_CORE_FBBLANK;
backlight_update_status(bd);
}
}
-out:
- mutex_unlock(&bd->ops_lock);
- return 0;
}
+EXPORT_SYMBOL(backlight_notify_blank);
-static int backlight_register_fb(struct backlight_device *bd)
+void backlight_notify_blank_all(struct device *display_dev, bool fb_on, bool prev_fb_on)
{
- memset(&bd->fb_notif, 0, sizeof(bd->fb_notif));
- bd->fb_notif.notifier_call = fb_notifier_callback;
+ struct backlight_device *bd;
- return fb_register_client(&bd->fb_notif);
-}
+ guard(mutex)(&backlight_dev_list_mutex);
-static void backlight_unregister_fb(struct backlight_device *bd)
-{
- fb_unregister_client(&bd->fb_notif);
-}
-#else
-static inline int backlight_register_fb(struct backlight_device *bd)
-{
- return 0;
+ list_for_each_entry(bd, &backlight_dev_list, entry)
+ backlight_notify_blank(bd, display_dev, fb_on, prev_fb_on);
}
-
-static inline void backlight_unregister_fb(struct backlight_device *bd)
-{
-}
-#endif /* CONFIG_FB_CORE */
+EXPORT_SYMBOL(backlight_notify_blank_all);
static void backlight_generate_event(struct backlight_device *bd,
enum backlight_update_reason reason)
@@ -447,12 +401,6 @@ struct backlight_device *backlight_device_register(const char *name,
return ERR_PTR(rc);
}
- rc = backlight_register_fb(new_bd);
- if (rc) {
- device_unregister(&new_bd->dev);
- return ERR_PTR(rc);
- }
-
new_bd->ops = ops;
#ifdef CONFIG_PMAC_BACKLIGHT
@@ -539,7 +487,6 @@ void backlight_device_unregister(struct backlight_device *bd)
bd->ops = NULL;
mutex_unlock(&bd->ops_lock);
- backlight_unregister_fb(bd);
device_unregister(&bd->dev);
}
EXPORT_SYMBOL(backlight_device_unregister);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 3267acf8dc5b..affe5c52471a 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -15,86 +15,59 @@
#include <linux/notifier.h>
#include <linux/ctype.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/slab.h>
-#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
- defined(CONFIG_LCD_CLASS_DEVICE_MODULE))
-static int to_lcd_power(int fb_blank)
-{
- switch (fb_blank) {
- case FB_BLANK_UNBLANK:
- return LCD_POWER_ON;
- /* deprecated; TODO: should become 'off' */
- case FB_BLANK_NORMAL:
- return LCD_POWER_REDUCED;
- case FB_BLANK_VSYNC_SUSPEND:
- return LCD_POWER_REDUCED_VSYNC_SUSPEND;
- /* 'off' */
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_POWERDOWN:
- default:
- return LCD_POWER_OFF;
- }
-}
+static DEFINE_MUTEX(lcd_dev_list_mutex);
+static LIST_HEAD(lcd_dev_list);
-/* This callback gets called when something important happens inside a
- * framebuffer driver. We're looking if that important event is blanking,
- * and if it is, we're switching lcd power as well ...
- */
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data)
+static void lcd_notify_blank(struct lcd_device *ld, struct device *display_dev,
+ int power)
{
- struct lcd_device *ld = container_of(self, struct lcd_device, fb_notif);
- struct fb_event *evdata = data;
- struct fb_info *info = evdata->info;
- struct lcd_device *fb_lcd = fb_lcd_device(info);
-
guard(mutex)(&ld->ops_lock);
- if (!ld->ops)
- return 0;
- if (ld->ops->controls_device && !ld->ops->controls_device(ld, info->device))
- return 0;
- if (fb_lcd && fb_lcd != ld)
- return 0;
+ if (!ld->ops || !ld->ops->set_power)
+ return;
+ if (ld->ops->controls_device && !ld->ops->controls_device(ld, display_dev))
+ return;
- if (event == FB_EVENT_BLANK) {
- int power = to_lcd_power(*(int *)evdata->data);
+ ld->ops->set_power(ld, power);
+}
- if (ld->ops->set_power)
- ld->ops->set_power(ld, power);
- } else {
- const struct fb_videomode *videomode = evdata->data;
+void lcd_notify_blank_all(struct device *display_dev, int power)
+{
+ struct lcd_device *ld;
- if (ld->ops->set_mode)
- ld->ops->set_mode(ld, videomode->xres, videomode->yres);
- }
+ guard(mutex)(&lcd_dev_list_mutex);
- return 0;
+ list_for_each_entry(ld, &lcd_dev_list, entry)
+ lcd_notify_blank(ld, display_dev, power);
}
+EXPORT_SYMBOL(lcd_notify_blank_all);
-static int lcd_register_fb(struct lcd_device *ld)
+static void lcd_notify_mode_change(struct lcd_device *ld, struct device *display_dev,
+ unsigned int width, unsigned int height)
{
- memset(&ld->fb_notif, 0, sizeof(ld->fb_notif));
- ld->fb_notif.notifier_call = fb_notifier_callback;
- return fb_register_client(&ld->fb_notif);
-}
+ guard(mutex)(&ld->ops_lock);
-static void lcd_unregister_fb(struct lcd_device *ld)
-{
- fb_unregister_client(&ld->fb_notif);
-}
-#else
-static int lcd_register_fb(struct lcd_device *ld)
-{
- return 0;
+ if (!ld->ops || !ld->ops->set_mode)
+ return;
+ if (ld->ops->controls_device && !ld->ops->controls_device(ld, display_dev))
+ return;
+
+ ld->ops->set_mode(ld, width, height);
}
-static inline void lcd_unregister_fb(struct lcd_device *ld)
+void lcd_notify_mode_change_all(struct device *display_dev,
+ unsigned int width, unsigned int height)
{
+ struct lcd_device *ld;
+
+ guard(mutex)(&lcd_dev_list_mutex);
+
+ list_for_each_entry(ld, &lcd_dev_list, entry)
+ lcd_notify_mode_change(ld, display_dev, width, height);
}
-#endif /* CONFIG_FB */
+EXPORT_SYMBOL(lcd_notify_mode_change_all);
static ssize_t lcd_power_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -245,11 +218,8 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
return ERR_PTR(rc);
}
- rc = lcd_register_fb(new_ld);
- if (rc) {
- device_unregister(&new_ld->dev);
- return ERR_PTR(rc);
- }
+ guard(mutex)(&lcd_dev_list_mutex);
+ list_add(&new_ld->entry, &lcd_dev_list);
return new_ld;
}
@@ -266,10 +236,12 @@ void lcd_device_unregister(struct lcd_device *ld)
if (!ld)
return;
+ guard(mutex)(&lcd_dev_list_mutex);
+ list_del(&ld->entry);
+
mutex_lock(&ld->ops_lock);
ld->ops = NULL;
mutex_unlock(&ld->ops_lock);
- lcd_unregister_fb(ld);
device_unregister(&ld->dev);
}
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 9afe701b2a1b..a63bb42c8f8b 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -1406,9 +1406,11 @@ static int wled_configure(struct wled *wled)
wled->ctrl_addr = be32_to_cpu(*prop_addr);
rc = of_property_read_string(dev->of_node, "label", &wled->name);
- if (rc)
+ if (rc) {
wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
-
+ if (!wled->name)
+ return -ENOMEM;
+ }
switch (wled->version) {
case 3:
u32_opts = wled3_opts;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 37bd18730fe0..f9cdbf8c53e3 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1168,7 +1168,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
c->vc_screenbuf_size - delta);
c->vc_origin = vga_vram_end - c->vc_screenbuf_size;
vga_rolled_over = 0;
- } else
+ } else if (oldo - delta >= (unsigned long)c->vc_screenbuf)
c->vc_origin -= delta;
c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char,
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index 082501feceb9..ec084323115f 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -431,9 +431,10 @@ static struct dac_ops ics5342_ops = {
static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
{
- struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
+ struct ics5342_info *ics_info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
+ struct dac_info *info = &ics_info->dac;
- if (! info)
+ if (!ics_info)
return NULL;
info->dacops = &ics5342_ops;
diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c
index e56065cdba97..2bdd67595891 100644
--- a/drivers/video/fbdev/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
@@ -649,13 +649,13 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
* is required for that largest resolution to avoid remaps at run
* time
*/
- if (carminefb_fix.smem_len > CARMINE_TOTAL_DIPLAY_MEM)
- carminefb_fix.smem_len = CARMINE_TOTAL_DIPLAY_MEM;
+ if (carminefb_fix.smem_len > CARMINE_TOTAL_DISPLAY_MEM)
+ carminefb_fix.smem_len = CARMINE_TOTAL_DISPLAY_MEM;
- else if (carminefb_fix.smem_len < CARMINE_TOTAL_DIPLAY_MEM) {
+ else if (carminefb_fix.smem_len < CARMINE_TOTAL_DISPLAY_MEM) {
printk(KERN_ERR "carminefb: Memory bar is only %d bytes, %d "
"are required.", carminefb_fix.smem_len,
- CARMINE_TOTAL_DIPLAY_MEM);
+ CARMINE_TOTAL_DISPLAY_MEM);
goto err_unmap_vregs;
}
diff --git a/drivers/video/fbdev/carminefb.h b/drivers/video/fbdev/carminefb.h
index 297688eba469..c9825481d96b 100644
--- a/drivers/video/fbdev/carminefb.h
+++ b/drivers/video/fbdev/carminefb.h
@@ -7,7 +7,7 @@
#define MAX_DISPLAY 2
#define CARMINE_DISPLAY_MEM (800 * 600 * 4)
-#define CARMINE_TOTAL_DIPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY)
+#define CARMINE_TOTAL_DISPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY)
#define CARMINE_USE_DISPLAY0 (1 << 0)
#define CARMINE_USE_DISPLAY1 (1 << 1)
diff --git a/drivers/video/fbdev/core/fb_backlight.c b/drivers/video/fbdev/core/fb_backlight.c
index 6fdaa9f81be9..dbed9696f4c5 100644
--- a/drivers/video/fbdev/core/fb_backlight.c
+++ b/drivers/video/fbdev/core/fb_backlight.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/backlight.h>
#include <linux/export.h>
#include <linux/fb.h>
#include <linux/mutex.h>
@@ -36,4 +37,15 @@ struct backlight_device *fb_bl_device(struct fb_info *info)
return info->bl_dev;
}
EXPORT_SYMBOL(fb_bl_device);
+
+void fb_bl_notify_blank(struct fb_info *info, int old_blank)
+{
+ bool on = info->blank == FB_BLANK_UNBLANK;
+ bool prev_on = old_blank == FB_BLANK_UNBLANK;
+
+ if (info->bl_dev)
+ backlight_notify_blank(info->bl_dev, info->device, on, prev_on);
+ else
+ backlight_notify_blank_all(info->device, on, prev_on);
+}
#endif
diff --git a/drivers/video/fbdev/core/fb_info.c b/drivers/video/fbdev/core/fb_info.c
index 4847ebe50d7d..52f9bd2c5417 100644
--- a/drivers/video/fbdev/core/fb_info.c
+++ b/drivers/video/fbdev/core/fb_info.c
@@ -42,6 +42,7 @@ struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
info->device = dev;
info->fbcon_rotate_hint = -1;
+ info->blank = FB_BLANK_UNBLANK;
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
mutex_init(&info->bl_curve_mutex);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index ac3c99ed92d1..2df48037688d 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -117,9 +117,14 @@ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
static struct fb_info *fbcon_info_from_console(int console)
{
+ signed char fb;
WARN_CONSOLE_UNLOCKED();
- return fbcon_registered_fb[con2fb_map[console]];
+ fb = con2fb_map[console];
+ if (fb < 0 || fb >= ARRAY_SIZE(fbcon_registered_fb))
+ return NULL;
+
+ return fbcon_registered_fb[fb];
}
static int logo_lines;
diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c
index 64843464c661..cd3821bd82e5 100644
--- a/drivers/video/fbdev/core/fbcvt.c
+++ b/drivers/video/fbdev/core/fbcvt.c
@@ -312,7 +312,7 @@ int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb)
cvt.f_refresh = cvt.refresh;
cvt.interlace = 1;
- if (!cvt.xres || !cvt.yres || !cvt.refresh) {
+ if (!cvt.xres || !cvt.yres || !cvt.refresh || cvt.f_refresh > INT_MAX) {
printk(KERN_INFO "fbcvt: Invalid input parameters\n");
return 1;
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 3c568cff2913..dfcf5e4d1d4c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -15,6 +15,8 @@
#include <linux/export.h>
#include <linux/fb.h>
#include <linux/fbcon.h>
+#include <linux/lcd.h>
+#include <linux/leds.h>
#include <video/nomodeset.h>
@@ -220,6 +222,12 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
return err;
}
+static void fb_lcd_notify_mode_change(struct fb_info *info,
+ struct fb_videomode *mode)
+{
+ lcd_notify_mode_change_all(info->device, mode->xres, mode->yres);
+}
+
int
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
@@ -227,7 +235,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
u32 activate;
struct fb_var_screeninfo old_var;
struct fb_videomode mode;
- struct fb_event event;
u32 unused;
if (var->activate & FB_ACTIVATE_INV_MODE) {
@@ -328,35 +335,76 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
!list_empty(&info->modelist))
ret = fb_add_videomode(&mode, &info->modelist);
- if (ret)
+ if (ret) {
+ info->var = old_var;
return ret;
+ }
- event.info = info;
- event.data = &mode;
- fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
+ fb_lcd_notify_mode_change(info, &mode);
return 0;
}
EXPORT_SYMBOL(fb_set_var);
-int
-fb_blank(struct fb_info *info, int blank)
+static void fb_lcd_notify_blank(struct fb_info *info)
{
- struct fb_event event;
- int ret = -EINVAL;
+ int power;
+
+ switch (info->blank) {
+ case FB_BLANK_UNBLANK:
+ power = LCD_POWER_ON;
+ break;
+ /* deprecated; TODO: should become 'off' */
+ case FB_BLANK_NORMAL:
+ power = LCD_POWER_REDUCED;
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ power = LCD_POWER_REDUCED_VSYNC_SUSPEND;
+ break;
+ /* 'off' */
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ default:
+ power = LCD_POWER_OFF;
+ break;
+ }
+
+ lcd_notify_blank_all(info->device, power);
+}
+
+static void fb_ledtrig_backlight_notify_blank(struct fb_info *info)
+{
+ if (info->blank == FB_BLANK_UNBLANK)
+ ledtrig_backlight_blank(false);
+ else
+ ledtrig_backlight_blank(true);
+}
+
+int fb_blank(struct fb_info *info, int blank)
+{
+ int old_blank = info->blank;
+ int ret;
+
+ if (!info->fbops->fb_blank)
+ return -EINVAL;
if (blank > FB_BLANK_POWERDOWN)
blank = FB_BLANK_POWERDOWN;
- event.info = info;
- event.data = &blank;
+ info->blank = blank;
- if (info->fbops->fb_blank)
- ret = info->fbops->fb_blank(blank, info);
+ ret = info->fbops->fb_blank(blank, info);
+ if (ret)
+ goto err;
- if (!ret)
- fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+ fb_bl_notify_blank(info, old_blank);
+ fb_lcd_notify_blank(info);
+ fb_ledtrig_backlight_notify_blank(info);
+ return 0;
+
+err:
+ info->blank = old_blank;
return ret;
}
EXPORT_SYMBOL(fb_blank);
@@ -388,7 +436,7 @@ static int fb_check_foreignness(struct fb_info *fi)
static int do_register_framebuffer(struct fb_info *fb_info)
{
- int i;
+ int i, err = 0;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
@@ -397,15 +445,31 @@ static int do_register_framebuffer(struct fb_info *fb_info)
if (num_registered_fb == FB_MAX)
return -ENXIO;
- num_registered_fb++;
for (i = 0 ; i < FB_MAX; i++)
if (!registered_fb[i])
break;
+
+ if (!fb_info->modelist.prev || !fb_info->modelist.next)
+ INIT_LIST_HEAD(&fb_info->modelist);
+
+ fb_var_to_videomode(&mode, &fb_info->var);
+ err = fb_add_videomode(&mode, &fb_info->modelist);
+ if (err < 0)
+ return err;
+
fb_info->node = i;
refcount_set(&fb_info->count, 1);
mutex_init(&fb_info->lock);
mutex_init(&fb_info->mm_lock);
+ /*
+ * With an fb_blank callback present, we assume that the
+ * display is blank, so that fb_blank() enables it on the
+ * first modeset.
+ */
+ if (fb_info->fbops->fb_blank)
+ fb_info->blank = FB_BLANK_POWERDOWN;
+
fb_device_create(fb_info);
if (fb_info->pixmap.addr == NULL) {
@@ -426,16 +490,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
if (bitmap_empty(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT))
bitmap_fill(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
- if (!fb_info->modelist.prev || !fb_info->modelist.next)
- INIT_LIST_HEAD(&fb_info->modelist);
-
if (fb_info->skip_vt_switch)
pm_vt_switch_required(fb_info->device, false);
else
pm_vt_switch_required(fb_info->device, true);
- fb_var_to_videomode(&mode, &fb_info->var);
- fb_add_videomode(&mode, &fb_info->modelist);
+ num_registered_fb++;
registered_fb[i] = fb_info;
#ifdef CONFIG_GUMSTIX_AM200EPD
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 06d75c767579..b8344c40073b 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -242,11 +242,11 @@ static ssize_t store_blank(struct device *device,
return count;
}
-static ssize_t show_blank(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t show_blank(struct device *device, struct device_attribute *attr, char *buf)
{
-// struct fb_info *fb_info = dev_get_drvdata(device);
- return 0;
+ struct fb_info *fb_info = dev_get_drvdata(device);
+
+ return sysfs_emit(buf, "%d\n", fb_info->blank);
}
static ssize_t store_console(struct device *device,
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index 8900f181f195..cfaf9454014d 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -1484,7 +1484,7 @@ static int nvidiafb_setup(char *options)
flatpanel = 1;
} else if (!strncmp(this_opt, "hwcur", 5)) {
hwcur = 1;
- } else if (!strncmp(this_opt, "noaccel", 6)) {
+ } else if (!strncmp(this_opt, "noaccel", 7)) {
noaccel = 1;
} else if (!strncmp(this_opt, "noscale", 7)) {
noscale = 1;
diff --git a/drivers/video/fbdev/via/via-gpio.c b/drivers/video/fbdev/via/via-gpio.c
index 9577c2cd52c7..27226a8f3f42 100644
--- a/drivers/video/fbdev/via/via-gpio.c
+++ b/drivers/video/fbdev/via/via-gpio.c
@@ -81,8 +81,7 @@ struct viafb_gpio_cfg {
/*
* GPIO access functions
*/
-static void via_gpio_set(struct gpio_chip *chip, unsigned int nr,
- int value)
+static int via_gpio_set(struct gpio_chip *chip, unsigned int nr, int value)
{
struct viafb_gpio_cfg *cfg = gpiochip_get_data(chip);
u8 reg;
@@ -99,13 +98,14 @@ static void via_gpio_set(struct gpio_chip *chip, unsigned int nr,
reg &= ~(0x10 << gpio->vg_mask_shift);
via_write_reg(VIASR, gpio->vg_port_index, reg);
spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+
+ return 0;
}
static int via_gpio_dir_out(struct gpio_chip *chip, unsigned int nr,
int value)
{
- via_gpio_set(chip, nr, value);
- return 0;
+ return via_gpio_set(chip, nr, value);
}
/*
@@ -146,7 +146,7 @@ static struct viafb_gpio_cfg viafb_gpio_config = {
.label = "VIAFB onboard GPIO",
.owner = THIS_MODULE,
.direction_output = via_gpio_dir_out,
- .set = via_gpio_set,
+ .set_rv = via_gpio_set,
.direction_input = via_gpio_dir_input,
.get = via_gpio_get,
.base = -1,
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0d8d37f712e8..0c25b2ed44eb 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -804,6 +804,15 @@ config IMX7ULP_WDT
To compile this driver as a module, choose M here: the
module will be called imx7ulp_wdt.
+config S32G_WDT
+ tristate "S32G Watchdog"
+ depends on ARCH_S32 || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This is the driver for the hardware watchdog on the NXP
+ S32G platforms. If you wish to have watchdog support
+ enabled, say Y, otherwise say N.
+
config DB500_WATCHDOG
tristate "ST-Ericsson DB800 watchdog"
depends on MFD_DB8500_PRCMU
@@ -1001,7 +1010,7 @@ config STM32_WATCHDOG
tristate "STM32 Independent WatchDoG (IWDG) support"
depends on ARCH_STM32 || COMPILE_TEST
select WATCHDOG_CORE
- default y
+ default ARCH_STM32
help
Say Y here to include support for the watchdog timer
in stm32 SoCs.
@@ -1363,6 +1372,17 @@ config INTEL_MID_WATCHDOG
To compile this driver as a module, choose M here.
+config INTEL_OC_WATCHDOG
+ tristate "Intel OC Watchdog"
+ depends on (X86 || COMPILE_TEST) && ACPI && HAS_IOPORT
+ select WATCHDOG_CORE
+ help
+ Hardware driver for Intel Over-Clocking watchdog present in
+ Platform Controller Hub (PCH) chipsets.
+
+ To compile this driver as a module, choose M here: the
+ module will be called intel_oc_wdt.
+
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on X86 && PCI
@@ -1869,7 +1889,7 @@ config OCTEON_WDT
config MARVELL_GTI_WDT
tristate "Marvell GTI Watchdog driver"
depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
- default y
+ default ARCH_THUNDER
select WATCHDOG_CORE
help
Marvell GTI hardware supports watchdog timer. First timeout
@@ -2035,7 +2055,7 @@ config 8xxx_WDT
config PIKA_WDT
tristate "PIKA FPGA Watchdog"
depends on WARP || (PPC64 && COMPILE_TEST)
- default y
+ default WARP
help
This enables the watchdog in the PIKA FPGA. Currently used on
the Warp platform.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c9482904bf87..bbd4d62d2cc3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
obj-$(CONFIG_IMX_SC_WDT) += imx_sc_wdt.o
obj-$(CONFIG_IMX7ULP_WDT) += imx7ulp_wdt.o
+obj-$(CONFIG_S32G_WDT) += s32g_wdt.o
obj-$(CONFIG_DB500_WATCHDOG) += db8500_wdt.o
obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
@@ -150,6 +151,7 @@ obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
+obj-$(CONFIG_INTEL_OC_WATCHDOG) += intel_oc_wdt.o
obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o
obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o
obj-$(CONFIG_NIC7018_WDT) += nic7018_wdt.o
diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
index 95d9e37df41c..66a158f67a71 100644
--- a/drivers/watchdog/apple_wdt.c
+++ b/drivers/watchdog/apple_wdt.c
@@ -95,9 +95,12 @@ static int apple_wdt_ping(struct watchdog_device *wdd)
static int apple_wdt_set_timeout(struct watchdog_device *wdd, unsigned int s)
{
struct apple_wdt *wdt = to_apple_wdt(wdd);
+ u32 actual;
writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
- writel_relaxed(wdt->clk_rate * s, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+
+ actual = min(s, wdd->max_hw_heartbeat_ms / 1000);
+ writel_relaxed(wdt->clk_rate * actual, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
wdd->timeout = s;
@@ -177,7 +180,7 @@ static int apple_wdt_probe(struct platform_device *pdev)
wdt->wdd.ops = &apple_wdt_ops;
wdt->wdd.info = &apple_wdt_info;
- wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate;
+ wdt->wdd.max_hw_heartbeat_ms = U32_MAX / wdt->clk_rate * 1000;
wdt->wdd.timeout = APPLE_WDT_TIMEOUT_DEFAULT;
wdt_ctrl = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CTRL);
diff --git a/drivers/watchdog/arm_smc_wdt.c b/drivers/watchdog/arm_smc_wdt.c
index 8f3d0c3a005f..bbba23ace7b8 100644
--- a/drivers/watchdog/arm_smc_wdt.c
+++ b/drivers/watchdog/arm_smc_wdt.c
@@ -46,6 +46,8 @@ static int smcwd_call(struct watchdog_device *wdd, enum smcwd_call call,
return -ENODEV;
if (res->a0 == PSCI_RET_INVALID_PARAMS)
return -EINVAL;
+ if (res->a0 == PSCI_RET_DISABLED)
+ return -ENODATA;
if (res->a0 != PSCI_RET_SUCCESS)
return -EIO;
return 0;
@@ -131,10 +133,19 @@ static int smcwd_probe(struct platform_device *pdev)
wdd->info = &smcwd_info;
/* get_timeleft is optional */
- if (smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL))
- wdd->ops = &smcwd_ops;
- else
+ err = smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL);
+ switch (err) {
+ case 0:
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ fallthrough;
+ case -ENODATA:
wdd->ops = &smcwd_timeleft_ops;
+ break;
+ default:
+ wdd->ops = &smcwd_ops;
+ break;
+ }
+
wdd->timeout = res.a2;
wdd->max_timeout = res.a2;
wdd->min_timeout = res.a1;
diff --git a/drivers/watchdog/cros_ec_wdt.c b/drivers/watchdog/cros_ec_wdt.c
index 716c23f4388c..9ffe7f505645 100644
--- a/drivers/watchdog/cros_ec_wdt.c
+++ b/drivers/watchdog/cros_ec_wdt.c
@@ -25,26 +25,22 @@ static int cros_ec_wdt_send_cmd(struct cros_ec_device *cros_ec,
union cros_ec_wdt_data *arg)
{
int ret;
- struct {
- struct cros_ec_command msg;
- union cros_ec_wdt_data data;
- } __packed buf = {
- .msg = {
- .version = 0,
- .command = EC_CMD_HANG_DETECT,
- .insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ?
- sizeof(struct ec_response_hang_detect) :
- 0,
- .outsize = sizeof(struct ec_params_hang_detect),
- },
- .data.req = arg->req
- };
-
- ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(union cros_ec_wdt_data));
+
+ msg->version = 0;
+ msg->command = EC_CMD_HANG_DETECT;
+ msg->insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ?
+ sizeof(struct ec_response_hang_detect) :
+ 0;
+ msg->outsize = sizeof(struct ec_params_hang_detect);
+ *(struct ec_params_hang_detect *)msg->data = arg->req;
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, msg);
if (ret < 0)
return ret;
- arg->resp = buf.data.resp;
+ arg->resp = *(struct ec_response_hang_detect *)msg->data;
return 0;
}
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 77039f2f0be5..afb7887c3a1e 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -30,6 +30,18 @@ struct da9052_wdt_data {
unsigned long jpast;
};
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int timeout;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. (default = "
+ __MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")");
+
static const struct {
u8 reg_val;
int time; /* Seconds */
@@ -168,10 +180,13 @@ static int da9052_wdt_probe(struct platform_device *pdev)
da9052_wdt = &driver_data->wdt;
da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+ da9052_wdt->min_hw_heartbeat_ms = DA9052_TWDMIN;
da9052_wdt->info = &da9052_wdt_info;
da9052_wdt->ops = &da9052_wdt_ops;
da9052_wdt->parent = dev;
watchdog_set_drvdata(da9052_wdt, driver_data);
+ watchdog_init_timeout(da9052_wdt, timeout, dev);
+ watchdog_set_nowayout(da9052_wdt, nowayout);
if (da9052->fault_log & DA9052_FAULTLOG_TWDERROR)
da9052_wdt->bootstatus |= WDIOF_CARDRESET;
@@ -180,11 +195,15 @@ static int da9052_wdt_probe(struct platform_device *pdev)
if (da9052->fault_log & DA9052_FAULTLOG_VDDFAULT)
da9052_wdt->bootstatus |= WDIOF_POWERUNDER;
- ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
- DA9052_CONTROLD_TWDSCALE, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to disable watchdog bits, %d\n", ret);
+ ret = da9052_reg_read(da9052, DA9052_CONTROL_D_REG);
+ if (ret < 0)
return ret;
+
+ /* Check if FW enabled the watchdog */
+ if (ret & DA9052_CONTROLD_TWDSCALE) {
+ /* Ensure proper initialization */
+ da9052_wdt_start(da9052_wdt);
+ set_bit(WDOG_HW_RUNNING, &da9052_wdt->status);
}
return devm_watchdog_register_device(dev, &driver_data->wdt);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 7672582fa407..9ab769aa0244 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -58,7 +58,6 @@
#include <linux/platform_device.h> /* For platform_driver framework */
#include <linux/pci.h> /* For pci functions */
#include <linux/ioport.h> /* For io-port access */
-#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
#include <linux/platform_data/itco_wdt.h>
@@ -102,8 +101,6 @@ struct iTCO_wdt_private {
* or memory-mapped PMC register bit 4 (TCO version 3).
*/
unsigned long __iomem *gcs_pmc;
- /* the lock for io operations */
- spinlock_t io_lock;
/* the PCI-device */
struct pci_dev *pci_dev;
/* whether or not the watchdog has been suspended */
@@ -286,13 +283,10 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
unsigned int val;
- spin_lock(&p->io_lock);
-
iTCO_vendor_pre_start(p->smi_res, wd_dev->timeout);
/* disable chipset's NO_REBOOT bit */
if (p->update_no_reboot_bit(p->no_reboot_priv, false)) {
- spin_unlock(&p->io_lock);
dev_err(wd_dev->parent, "failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n");
return -EIO;
}
@@ -309,7 +303,6 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
val &= 0xf7ff;
outw(val, TCO1_CNT(p));
val = inw(TCO1_CNT(p));
- spin_unlock(&p->io_lock);
if (val & 0x0800)
return -1;
@@ -321,8 +314,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev)
struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
unsigned int val;
- spin_lock(&p->io_lock);
-
iTCO_vendor_pre_stop(p->smi_res);
/* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
@@ -334,8 +325,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev)
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
p->update_no_reboot_bit(p->no_reboot_priv, true);
- spin_unlock(&p->io_lock);
-
if ((val & 0x0800) == 0)
return -1;
return 0;
@@ -345,8 +334,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
{
struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
- spin_lock(&p->io_lock);
-
/* Reload the timer by writing to the TCO Timer Counter register */
if (p->iTCO_version >= 2) {
outw(0x01, TCO_RLD(p));
@@ -358,7 +345,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
outb(0x01, TCO_RLD(p));
}
- spin_unlock(&p->io_lock);
return 0;
}
@@ -385,24 +371,20 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
/* Write new heartbeat to watchdog */
if (p->iTCO_version >= 2) {
- spin_lock(&p->io_lock);
val16 = inw(TCOv2_TMR(p));
val16 &= 0xfc00;
val16 |= tmrval;
outw(val16, TCOv2_TMR(p));
val16 = inw(TCOv2_TMR(p));
- spin_unlock(&p->io_lock);
if ((val16 & 0x3ff) != tmrval)
return -EINVAL;
} else if (p->iTCO_version == 1) {
- spin_lock(&p->io_lock);
val8 = inb(TCOv1_TMR(p));
val8 &= 0xc0;
val8 |= (tmrval & 0xff);
outb(val8, TCOv1_TMR(p));
val8 = inb(TCOv1_TMR(p));
- spin_unlock(&p->io_lock);
if ((val8 & 0x3f) != tmrval)
return -EINVAL;
@@ -421,19 +403,15 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
/* read the TCO Timer */
if (p->iTCO_version >= 2) {
- spin_lock(&p->io_lock);
val16 = inw(TCO_RLD(p));
val16 &= 0x3ff;
- spin_unlock(&p->io_lock);
time_left = ticks_to_seconds(p, val16);
} else if (p->iTCO_version == 1) {
- spin_lock(&p->io_lock);
val8 = inb(TCO_RLD(p));
val8 &= 0x3f;
if (!(inw(TCO1_STS(p)) & 0x0008))
val8 += (inb(TCOv1_TMR(p)) & 0x3f);
- spin_unlock(&p->io_lock);
time_left = ticks_to_seconds(p, val8);
}
@@ -493,8 +471,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
- spin_lock_init(&p->io_lock);
-
p->tco_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_TCO);
if (!p->tco_res)
return -ENODEV;
@@ -604,6 +580,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
dev_info(dev, "timeout value out of range, using %d\n",
WATCHDOG_TIMEOUT);
+ heartbeat = WATCHDOG_TIMEOUT;
}
watchdog_stop_on_reboot(&p->wddev);
diff --git a/drivers/watchdog/intel_oc_wdt.c b/drivers/watchdog/intel_oc_wdt.c
new file mode 100644
index 000000000000..7c0551106981
--- /dev/null
+++ b/drivers/watchdog/intel_oc_wdt.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel OC Watchdog driver
+ *
+ * Copyright (C) 2025, Siemens
+ * Author: Diogo Ivo <diogo.ivo@siemens.com>
+ */
+
+#define DRV_NAME "intel_oc_wdt"
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define INTEL_OC_WDT_TOV GENMASK(9, 0)
+#define INTEL_OC_WDT_MIN_TOV 1
+#define INTEL_OC_WDT_MAX_TOV 1024
+#define INTEL_OC_WDT_DEF_TOV 60
+
+/*
+ * One-time writable lock bit. If set forbids
+ * modification of itself, _TOV and _EN until
+ * next reboot.
+ */
+#define INTEL_OC_WDT_CTL_LCK BIT(12)
+
+#define INTEL_OC_WDT_EN BIT(14)
+#define INTEL_OC_WDT_NO_ICCSURV_STS BIT(24)
+#define INTEL_OC_WDT_ICCSURV_STS BIT(25)
+#define INTEL_OC_WDT_RLD BIT(31)
+
+#define INTEL_OC_WDT_STS_BITS (INTEL_OC_WDT_NO_ICCSURV_STS | \
+ INTEL_OC_WDT_ICCSURV_STS)
+
+#define INTEL_OC_WDT_CTRL_REG(wdt) ((wdt)->ctrl_res->start)
+
+struct intel_oc_wdt {
+ struct watchdog_device wdd;
+ struct resource *ctrl_res;
+ bool locked;
+};
+
+static int heartbeat;
+module_param(heartbeat, uint, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. (default="
+ __MODULE_STRING(WDT_HEARTBEAT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int intel_oc_wdt_start(struct watchdog_device *wdd)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ if (oc_wdt->locked)
+ return 0;
+
+ outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_EN,
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_stop(struct watchdog_device *wdd)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_EN,
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_ping(struct watchdog_device *wdd)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_RLD,
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int t)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ outl((inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_TOV) | (t - 1),
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ wdd->timeout = t;
+
+ return 0;
+}
+
+static const struct watchdog_info intel_oc_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = DRV_NAME,
+};
+
+static const struct watchdog_ops intel_oc_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = intel_oc_wdt_start,
+ .stop = intel_oc_wdt_stop,
+ .ping = intel_oc_wdt_ping,
+ .set_timeout = intel_oc_wdt_set_timeout,
+};
+
+static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt)
+{
+ struct watchdog_info *info;
+ unsigned long val;
+
+ val = inl(INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ if (val & INTEL_OC_WDT_STS_BITS)
+ oc_wdt->wdd.bootstatus |= WDIOF_CARDRESET;
+
+ oc_wdt->locked = !!(val & INTEL_OC_WDT_CTL_LCK);
+
+ if (val & INTEL_OC_WDT_EN) {
+ /*
+ * No need to issue a ping here to "commit" the new timeout
+ * value to hardware as the watchdog core schedules one
+ * immediately when registering the watchdog.
+ */
+ set_bit(WDOG_HW_RUNNING, &oc_wdt->wdd.status);
+
+ if (oc_wdt->locked) {
+ info = (struct watchdog_info *)&intel_oc_wdt_info;
+ /*
+ * Set nowayout unconditionally as we cannot stop
+ * the watchdog.
+ */
+ nowayout = true;
+ /*
+ * If we are locked read the current timeout value
+ * and inform the core we can't change it.
+ */
+ oc_wdt->wdd.timeout = (val & INTEL_OC_WDT_TOV) + 1;
+ info->options &= ~WDIOF_SETTIMEOUT;
+
+ dev_info(oc_wdt->wdd.parent,
+ "Register access locked, heartbeat fixed at: %u s\n",
+ oc_wdt->wdd.timeout);
+ }
+ } else if (oc_wdt->locked) {
+ /*
+ * In case the watchdog is disabled and locked there
+ * is nothing we can do with it so just fail probing.
+ */
+ return -EACCES;
+ }
+
+ val &= ~INTEL_OC_WDT_TOV;
+ outl(val | (oc_wdt->wdd.timeout - 1), INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_oc_wdt *oc_wdt;
+ struct watchdog_device *wdd;
+ int ret;
+
+ oc_wdt = devm_kzalloc(&pdev->dev, sizeof(*oc_wdt), GFP_KERNEL);
+ if (!oc_wdt)
+ return -ENOMEM;
+
+ oc_wdt->ctrl_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!oc_wdt->ctrl_res) {
+ dev_err(&pdev->dev, "missing I/O resource\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_region(&pdev->dev, oc_wdt->ctrl_res->start,
+ resource_size(oc_wdt->ctrl_res), pdev->name)) {
+ dev_err(dev, "resource %pR already in use, device disabled\n",
+ oc_wdt->ctrl_res);
+ return -EBUSY;
+ }
+
+ wdd = &oc_wdt->wdd;
+ wdd->min_timeout = INTEL_OC_WDT_MIN_TOV;
+ wdd->max_timeout = INTEL_OC_WDT_MAX_TOV;
+ wdd->timeout = INTEL_OC_WDT_DEF_TOV;
+ wdd->info = &intel_oc_wdt_info;
+ wdd->ops = &intel_oc_wdt_ops;
+ wdd->parent = dev;
+
+ watchdog_init_timeout(wdd, heartbeat, dev);
+
+ ret = intel_oc_wdt_setup(oc_wdt);
+ if (ret)
+ return ret;
+
+ watchdog_set_drvdata(wdd, oc_wdt);
+ watchdog_set_nowayout(wdd, nowayout);
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+
+ return devm_watchdog_register_device(dev, wdd);
+}
+
+static const struct acpi_device_id intel_oc_wdt_match[] = {
+ { "INT3F0D" },
+ { "INTC1099" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, intel_oc_wdt_match);
+
+static struct platform_driver intel_oc_wdt_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = intel_oc_wdt_match,
+ },
+ .probe = intel_oc_wdt_probe,
+};
+
+module_platform_driver(intel_oc_wdt_platform_driver);
+
+MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel OC Watchdog driver");
diff --git a/drivers/watchdog/lenovo_se30_wdt.c b/drivers/watchdog/lenovo_se30_wdt.c
index 024b842499b3..1c73bb7eeeee 100644
--- a/drivers/watchdog/lenovo_se30_wdt.c
+++ b/drivers/watchdog/lenovo_se30_wdt.c
@@ -271,6 +271,8 @@ static int lenovo_se30_wdt_probe(struct platform_device *pdev)
return -EBUSY;
priv->shm_base_addr = devm_ioremap(dev, base_phys, SHM_WIN_SIZE);
+ if (!priv->shm_base_addr)
+ return -ENOMEM;
priv->wdt_cfg.mod = WDT_MODULE;
priv->wdt_cfg.idx = WDT_CFG_INDEX;
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 132699e2f247..b636650b714b 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -579,7 +579,7 @@ static struct notifier_block usb_pcwd_notifier = {
.notifier_call = usb_pcwd_notify_sys,
};
-/**
+/*
* usb_pcwd_delete
*/
static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
@@ -590,7 +590,7 @@ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
kfree(usb_pcwd);
}
-/**
+/*
* usb_pcwd_probe
*
* Called by the usb core when a new device is connected that it thinks
@@ -758,7 +758,7 @@ error:
}
-/**
+/*
* usb_pcwd_disconnect
*
* Called by the usb core when the device is removed from the system.
diff --git a/drivers/watchdog/pretimeout_noop.c b/drivers/watchdog/pretimeout_noop.c
index 4799551dd784..74ec02b9ffca 100644
--- a/drivers/watchdog/pretimeout_noop.c
+++ b/drivers/watchdog/pretimeout_noop.c
@@ -11,7 +11,7 @@
/**
* pretimeout_noop - No operation on watchdog pretimeout event
- * @wdd - watchdog_device
+ * @wdd: watchdog_device
*
* This function prints a message about pretimeout to kernel log.
*/
diff --git a/drivers/watchdog/pretimeout_panic.c b/drivers/watchdog/pretimeout_panic.c
index 2cc3c41d2be5..8c3ac674dc45 100644
--- a/drivers/watchdog/pretimeout_panic.c
+++ b/drivers/watchdog/pretimeout_panic.c
@@ -11,7 +11,7 @@
/**
* pretimeout_panic - Panic on watchdog pretimeout event
- * @wdd - watchdog_device
+ * @wdd: watchdog_device
*
* Panic, watchdog has not been fed till pretimeout event.
*/
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 006f9c61aa64..dfaac5995c84 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -181,6 +181,12 @@ static const struct qcom_wdt_match_data match_data_apcs_tmr = {
.max_tick_count = 0x10000000U,
};
+static const struct qcom_wdt_match_data match_data_ipq5424 = {
+ .offset = reg_offset_data_kpss,
+ .pretimeout = true,
+ .max_tick_count = 0xFFFFFU,
+};
+
static const struct qcom_wdt_match_data match_data_kpss = {
.offset = reg_offset_data_kpss,
.pretimeout = true,
@@ -322,6 +328,7 @@ static const struct dev_pm_ops qcom_wdt_pm_ops = {
};
static const struct of_device_id qcom_wdt_of_table[] = {
+ { .compatible = "qcom,apss-wdt-ipq5424", .data = &match_data_ipq5424 },
{ .compatible = "qcom,kpss-timer", .data = &match_data_apcs_tmr },
{ .compatible = "qcom,scss-timer", .data = &match_data_apcs_tmr },
{ .compatible = "qcom,kpss-wdt", .data = &match_data_kpss },
diff --git a/drivers/watchdog/s32g_wdt.c b/drivers/watchdog/s32g_wdt.c
new file mode 100644
index 000000000000..ad55063060af
--- /dev/null
+++ b/drivers/watchdog/s32g_wdt.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Watchdog driver for S32G SoC
+ *
+ * Copyright 2017-2019, 2021-2025 NXP.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define DRIVER_NAME "s32g-swt"
+
+#define S32G_SWT_CR(__base) ((__base) + 0x00) /* Control Register offset */
+#define S32G_SWT_CR_SM (BIT(9) | BIT(10)) /* -> Service Mode */
+#define S32G_SWT_CR_STP BIT(2) /* -> Stop Mode Control */
+#define S32G_SWT_CR_FRZ BIT(1) /* -> Debug Mode Control */
+#define S32G_SWT_CR_WEN BIT(0) /* -> Watchdog Enable */
+
+#define S32G_SWT_TO(__base) ((__base) + 0x08) /* Timeout Register offset */
+
+#define S32G_SWT_SR(__base) ((__base) + 0x10) /* Service Register offset */
+#define S32G_WDT_SEQ1 0xA602 /* -> service sequence number 1 */
+#define S32G_WDT_SEQ2 0xB480 /* -> service sequence number 2 */
+
+#define S32G_SWT_CO(__base) ((__base) + 0x14) /* Counter output register */
+
+#define S32G_WDT_DEFAULT_TIMEOUT 30
+
+struct s32g_wdt_device {
+ int rate;
+ void __iomem *base;
+ struct watchdog_device wdog;
+};
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static unsigned int timeout_param = S32G_WDT_DEFAULT_TIMEOUT;
+module_param(timeout_param, uint, 0);
+MODULE_PARM_DESC(timeout_param, "Watchdog timeout in seconds (default="
+ __MODULE_STRING(S32G_WDT_DEFAULT_TIMEOUT) ")");
+
+static bool early_enable;
+module_param(early_enable, bool, 0);
+MODULE_PARM_DESC(early_enable,
+ "Watchdog is started on module insertion (default=false)");
+
+static const struct watchdog_info s32g_wdt_info = {
+ .identity = "s32g watchdog",
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+ WDIOC_GETTIMEOUT | WDIOC_GETTIMELEFT,
+};
+
+static struct s32g_wdt_device *wdd_to_s32g_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct s32g_wdt_device, wdog);
+}
+
+static unsigned int wdog_sec_to_count(struct s32g_wdt_device *wdev, unsigned int timeout)
+{
+ return wdev->rate * timeout;
+}
+
+static int s32g_wdt_ping(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+
+ writel(S32G_WDT_SEQ1, S32G_SWT_SR(wdev->base));
+ writel(S32G_WDT_SEQ2, S32G_SWT_SR(wdev->base));
+
+ return 0;
+}
+
+static int s32g_wdt_start(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+ unsigned long val;
+
+ val = readl(S32G_SWT_CR(wdev->base));
+
+ val |= S32G_SWT_CR_WEN;
+
+ writel(val, S32G_SWT_CR(wdev->base));
+
+ return 0;
+}
+
+static int s32g_wdt_stop(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+ unsigned long val;
+
+ val = readl(S32G_SWT_CR(wdev->base));
+
+ val &= ~S32G_SWT_CR_WEN;
+
+ writel(val, S32G_SWT_CR(wdev->base));
+
+ return 0;
+}
+
+static int s32g_wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+
+ writel(wdog_sec_to_count(wdev, timeout), S32G_SWT_TO(wdev->base));
+
+ wdog->timeout = timeout;
+
+ /*
+ * Conforming to the documentation, the timeout counter is
+ * loaded when servicing is operated (aka ping) or when the
+ * counter is enabled. In case the watchdog is already started
+ * it must be stopped and started again to update the timeout
+ * register or a ping can be sent to refresh the counter. Here
+ * we choose to send a ping to the watchdog which is harmless
+ * if the watchdog is stopped.
+ */
+ return s32g_wdt_ping(wdog);
+}
+
+static unsigned int s32g_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+ unsigned long counter;
+ bool is_running;
+
+ /*
+ * The counter output can be read only if the SWT is
+ * disabled. Given the latency between the internal counter
+ * and the counter output update, there can be very small
+ * difference. However, we can accept this matter of fact
+ * given the resolution is a second based unit for the output.
+ */
+ is_running = watchdog_hw_running(wdog);
+
+ if (is_running)
+ s32g_wdt_stop(wdog);
+
+ counter = readl(S32G_SWT_CO(wdev->base));
+
+ if (is_running)
+ s32g_wdt_start(wdog);
+
+ return counter / wdev->rate;
+}
+
+static const struct watchdog_ops s32g_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = s32g_wdt_start,
+ .stop = s32g_wdt_stop,
+ .ping = s32g_wdt_ping,
+ .set_timeout = s32g_wdt_set_timeout,
+ .get_timeleft = s32g_wdt_get_timeleft,
+};
+
+static void s32g_wdt_init(struct s32g_wdt_device *wdev)
+{
+ unsigned long val;
+
+ /* Set the watchdog's Time-Out value */
+ val = wdog_sec_to_count(wdev, wdev->wdog.timeout);
+
+ writel(val, S32G_SWT_TO(wdev->base));
+
+ /*
+ * Get the control register content. We are at init time, the
+ * watchdog should not be started.
+ */
+ val = readl(S32G_SWT_CR(wdev->base));
+
+ /*
+ * We want to allow the watchdog timer to be stopped when
+ * device enters debug mode.
+ */
+ val |= S32G_SWT_CR_FRZ;
+
+ /*
+ * However, when the CPU is in WFI or suspend mode, the
+ * watchdog must continue. The documentation refers it as the
+ * stopped mode.
+ */
+ val &= ~S32G_SWT_CR_STP;
+
+ /*
+ * Use Fixed Service Sequence to ping the watchdog which is
+ * 0x00 configuration value for the service mode. It should be
+ * already set because it is the default value but we reset it
+ * in case.
+ */
+ val &= ~S32G_SWT_CR_SM;
+
+ writel(val, S32G_SWT_CR(wdev->base));
+
+ /*
+ * When the 'early_enable' option is set, we start the
+ * watchdog from the kernel.
+ */
+ if (early_enable) {
+ s32g_wdt_start(&wdev->wdog);
+ set_bit(WDOG_HW_RUNNING, &wdev->wdog.status);
+ }
+}
+
+static int s32g_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct clk *clk;
+ struct s32g_wdt_device *wdev;
+ struct watchdog_device *wdog;
+ int ret;
+
+ wdev = devm_kzalloc(dev, sizeof(*wdev), GFP_KERNEL);
+ if (!wdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdev->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wdev->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(wdev->base), "Can not get resource\n");
+
+ clk = devm_clk_get_enabled(dev, "counter");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Can't get Watchdog clock\n");
+
+ wdev->rate = clk_get_rate(clk);
+ if (!wdev->rate) {
+ dev_err(dev, "Input clock rate is not valid\n");
+ return -EINVAL;
+ }
+
+ wdog = &wdev->wdog;
+ wdog->info = &s32g_wdt_info;
+ wdog->ops = &s32g_wdt_ops;
+
+ /*
+ * The code converts the timeout into a counter a value, if
+ * the value is less than 0x100, then it is clamped by the SWT
+ * module, so it is safe to specify a zero value as the
+ * minimum timeout.
+ */
+ wdog->min_timeout = 0;
+
+ /*
+ * The counter register is a 32 bits long, so the maximum
+ * counter value is UINT_MAX and the timeout in second is the
+ * value divided by the rate.
+ *
+ * For instance, a rate of 51MHz lead to 84 seconds maximum
+ * timeout.
+ */
+ wdog->max_timeout = UINT_MAX / wdev->rate;
+
+ /*
+ * The module param and the DT 'timeout-sec' property will
+ * override the default value if they are specified.
+ */
+ ret = watchdog_init_timeout(wdog, timeout_param, dev);
+ if (ret)
+ return ret;
+
+ /*
+ * As soon as the watchdog is started, there is no way to stop
+ * it if the 'nowayout' option is set at boot time
+ */
+ watchdog_set_nowayout(wdog, nowayout);
+
+ /*
+ * The devm_ version of the watchdog_register_device()
+ * function will call watchdog_unregister_device() when the
+ * device is removed.
+ */
+ watchdog_stop_on_unregister(wdog);
+
+ s32g_wdt_init(wdev);
+
+ ret = devm_watchdog_register_device(dev, wdog);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot register watchdog device\n");
+
+ dev_info(dev, "S32G Watchdog Timer Registered, timeout=%ds, nowayout=%d, early_enable=%d\n",
+ wdog->timeout, nowayout, early_enable);
+
+ return 0;
+}
+
+static const struct of_device_id s32g_wdt_dt_ids[] = {
+ { .compatible = "nxp,s32g2-swt" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s32g_wdt_dt_ids);
+
+static struct platform_driver s32g_wdt_driver = {
+ .probe = s32g_wdt_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = s32g_wdt_dt_ids,
+ },
+};
+
+module_platform_driver(s32g_wdt_driver);
+
+MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@linaro.org>");
+MODULE_DESCRIPTION("Watchdog driver for S32G SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index bdd81d8074b2..40901bdac426 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -82,6 +82,10 @@
#define GS_CLUSTER2_NONCPU_INT_EN 0x1644
#define GS_RST_STAT_REG_OFFSET 0x3B44
+#define EXYNOS990_CLUSTER2_NONCPU_OUT 0x1620
+#define EXYNOS990_CLUSTER2_NONCPU_INT_EN 0x1644
+#define EXYNOS990_CLUSTER2_WDTRESET_BIT 23
+
/**
* DOC: Quirk flags for different Samsung watchdog IP-cores
*
@@ -259,6 +263,32 @@ static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = {
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
};
+static const struct s3c2410_wdt_variant drv_data_exynos990_cl0 = {
+ .mask_reset_reg = GS_CLUSTER0_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS850_CLUSTER0_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOSAUTOV920_CLUSTER0_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
+ QUIRK_HAS_DBGACK_BIT,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos990_cl2 = {
+ .mask_reset_reg = EXYNOS990_CLUSTER2_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS990_CLUSTER2_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOS990_CLUSTER2_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
+ QUIRK_HAS_DBGACK_BIT,
+};
+
static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl0 = {
.mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN,
.mask_bit = 2,
@@ -350,6 +380,8 @@ static const struct of_device_id s3c2410_wdt_match[] = {
.data = &drv_data_exynos7 },
{ .compatible = "samsung,exynos850-wdt",
.data = &drv_data_exynos850_cl0 },
+ { .compatible = "samsung,exynos990-wdt",
+ .data = &drv_data_exynos990_cl0 },
{ .compatible = "samsung,exynosautov9-wdt",
.data = &drv_data_exynosautov9_cl0 },
{ .compatible = "samsung,exynosautov920-wdt",
@@ -678,7 +710,8 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt)
if (variant == &drv_data_exynos850_cl0 ||
variant == &drv_data_exynosautov9_cl0 ||
variant == &drv_data_gs101_cl0 ||
- variant == &drv_data_exynosautov920_cl0) {
+ variant == &drv_data_exynosautov920_cl0 ||
+ variant == &drv_data_exynos990_cl0) {
u32 index;
int err;
@@ -700,6 +733,10 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt)
else if (variant == &drv_data_exynosautov920_cl0)
variant = &drv_data_exynosautov920_cl1;
break;
+ case 2:
+ if (variant == &drv_data_exynos990_cl0)
+ variant = &drv_data_exynos990_cl2;
+ break;
default:
return dev_err_probe(dev, -EINVAL, "wrong cluster index: %u\n", index);
}
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index 8ad06b54c5ad..b356a272ff9a 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -291,7 +291,7 @@ static int stm32_iwdg_irq_init(struct platform_device *pdev,
return 0;
if (of_property_read_bool(np, "wakeup-source")) {
- ret = device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
if (ret)
return ret;
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index dc5f29560e9b..3918a600f2a0 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -264,7 +264,7 @@ static int wdtpci_get_status(int *status)
return 0;
}
-/**
+/*
* wdtpci_get_temperature:
*
* Reports the temperature in degrees Fahrenheit. The API is in