summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/qaic/Kconfig1
-rw-r--r--drivers/acpi/irq.c16
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c39
-rw-r--r--drivers/ata/libata-sata.c12
-rw-r--r--drivers/ata/libata-scsi.c31
-rw-r--r--drivers/ata/libata.h3
-rw-r--r--drivers/ata/sata_sx4.c30
-rw-r--r--drivers/base/property.c12
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/zram/backend_deflate.c12
-rw-r--r--drivers/block/zram/backend_lz4.c2
-rw-r--r--drivers/block/zram/backend_lz4hc.c2
-rw-r--r--drivers/block/zram/backend_zstd.c2
-rw-r--r--drivers/block/zram/zcomp.h9
-rw-r--r--drivers/block/zram/zram_drv.c21
-rw-r--r--drivers/clocksource/timer-stm32-lp.c61
-rw-r--r--drivers/cxl/Kconfig71
-rw-r--r--drivers/cxl/acpi.c24
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/cdat.c2
-rw-r--r--drivers/cxl/core/core.h4
-rw-r--r--drivers/cxl/core/edac.c2102
-rw-r--r--drivers/cxl/core/features.c43
-rw-r--r--drivers/cxl/core/hdm.c11
-rw-r--r--drivers/cxl/core/mbox.c11
-rw-r--r--drivers/cxl/core/memdev.c5
-rw-r--r--drivers/cxl/core/pci.c48
-rw-r--r--drivers/cxl/core/port.c23
-rw-r--r--drivers/cxl/core/region.c189
-rw-r--r--drivers/cxl/cxl.h23
-rw-r--r--drivers/cxl/cxlmem.h30
-rw-r--r--drivers/cxl/mem.c4
-rw-r--r--drivers/cxl/port.c15
-rwxr-xr-xdrivers/edac/mem_repair.c9
-rw-r--r--drivers/firewire/Kconfig2
-rw-r--r--drivers/firmware/smccc/kvm_guest.c10
-rw-r--r--drivers/firmware/smccc/smccc.c17
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig3
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h108
-rw-r--r--drivers/gpu/drm/loongson/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig3
-rw-r--r--drivers/gpu/drm/qxl/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/xe/Kconfig2
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-appletb-kbd.c9
-rw-r--r--drivers/hid/hid-core.c9
-rw-r--r--drivers/hid/hid-corsair-void.c4
-rw-r--r--drivers/hid/hid-cp2112.c66
-rw-r--r--drivers/hid/hid-hyperv.c4
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-kysona.c46
-rw-r--r--drivers/hid/hid-lg4ff.c6
-rw-r--r--drivers/hid/hid-magicmouse.c74
-rw-r--r--drivers/hid/hid-mcp2200.c23
-rw-r--r--drivers/hid/hid-mcp2221.c10
-rw-r--r--drivers/hid/hid-multitouch.c12
-rw-r--r--drivers/hid/hid-quirks.c5
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c7
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c4
-rw-r--r--drivers/hid/usbhid/hid-core.c25
-rw-r--r--drivers/hv/Kconfig7
-rw-r--r--drivers/hv/connection.c23
-rw-r--r--drivers/hv/hv_common.c76
-rw-r--r--drivers/hv/vmbus_drv.c95
-rw-r--r--drivers/hwmon/Kconfig29
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/aht10.c16
-rw-r--r--drivers/hwmon/amc6821.c50
-rw-r--r--drivers/hwmon/asus-ec-sensors.c53
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/gpio-fan.c103
-rw-r--r--drivers/hwmon/ina238.c214
-rw-r--r--drivers/hwmon/ina2xx.c8
-rw-r--r--drivers/hwmon/isl28022.c8
-rw-r--r--drivers/hwmon/k10temp.c7
-rw-r--r--drivers/hwmon/kbatt.c147
-rw-r--r--drivers/hwmon/kfan.c246
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/ltc2992.c30
-rw-r--r--drivers/hwmon/max6639.c16
-rw-r--r--drivers/hwmon/max77705-hwmon.c221
-rw-r--r--drivers/hwmon/nct7363.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/lm25066.c2
-rw-r--r--drivers/hwmon/pmbus/lt3074.c122
-rw-r--r--drivers/hwmon/pmbus/max34440.c119
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c4
-rw-r--r--drivers/hwmon/pmbus/mpq8785.c91
-rw-r--r--drivers/hwmon/pmbus/pmbus.h19
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c69
-rw-r--r--drivers/hwmon/pmbus/tda38640.c2
-rw-r--r--drivers/hwmon/pmbus/tps25990.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c16
-rw-r--r--drivers/hwmon/pwm-fan.c4
-rw-r--r--drivers/hwmon/qnap-mcu-hwmon.c1
-rw-r--r--drivers/hwmon/spd5118.c357
-rw-r--r--drivers/hwmon/tmp102.c5
-rw-r--r--drivers/hwmon/xgene-hwmon.c39
-rw-r--r--drivers/input/joystick/xpad.c53
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c6
-rw-r--r--drivers/input/keyboard/matrix_keypad.c30
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c25
-rw-r--r--drivers/input/misc/ims-pcu.c6
-rw-r--r--drivers/iommu/amd/init.c3
-rw-r--r--drivers/leds/.kunitconfig4
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c6
-rw-r--r--drivers/leds/flash/Kconfig11
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-tps6131x.c815
-rw-r--r--drivers/leds/led-class-flash.c15
-rw-r--r--drivers/leds/led-class-multicolor.c3
-rw-r--r--drivers/leds/led-core.c43
-rw-r--r--drivers/leds/led-test.c132
-rw-r--r--drivers/leds/led-triggers.c13
-rw-r--r--drivers/leds/leds-cros_ec.c21
-rw-r--r--drivers/leds/leds-lp8860.c214
-rw-r--r--drivers/leds/leds-pca9532.c11
-rw-r--r--drivers/leds/leds-pca955x.c28
-rw-r--r--drivers/leds/leds-pca995x.c2
-rw-r--r--drivers/leds/leds-tca6507.c11
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/rgb/leds-mt6370-rgb.c16
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c5
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c7
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c48
-rw-r--r--drivers/mailbox/Kconfig14
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/cv1800-mailbox.c220
-rw-r--r--drivers/mailbox/imx-mailbox.c21
-rw-r--r--drivers/mailbox/mailbox.c199
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c51
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c16
-rw-r--r--drivers/md/dm-bufio.c189
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-delay.c17
-rw-r--r--drivers/md/dm-dust.c4
-rw-r--r--drivers/md/dm-ebs-target.c3
-rw-r--r--drivers/md/dm-flakey.c118
-rw-r--r--drivers/md/dm-ioctl.c1
-rw-r--r--drivers/md/dm-linear.c4
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-mpath.c243
-rw-r--r--drivers/md/dm-raid1.c5
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-stripe.c5
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-table.c263
-rw-r--r--drivers/md/dm-vdo/indexer/volume.c24
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c15
-rw-r--r--drivers/md/dm-verity-verify-sig.c17
-rw-r--r--drivers/md/dm-zone.c98
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c73
-rw-r--r--drivers/md/dm.h6
-rw-r--r--drivers/mfd/88pm886.c14
-rw-r--r--drivers/mfd/Kconfig35
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/aat2870-core.c4
-rw-r--r--drivers/mfd/as3722.c4
-rw-r--r--drivers/mfd/bcm590xx.c66
-rw-r--r--drivers/mfd/exynos-lpass.c31
-rw-r--r--drivers/mfd/max14577.c1
-rw-r--r--drivers/mfd/max77541.c2
-rw-r--r--drivers/mfd/max77705.c4
-rw-r--r--drivers/mfd/max8925-i2c.c1
-rw-r--r--drivers/mfd/rohm-bd96801.c565
-rw-r--r--drivers/mfd/rt5033.c6
-rw-r--r--drivers/mfd/sec-acpm.c442
-rw-r--r--drivers/mfd/sec-common.c301
-rw-r--r--drivers/mfd/sec-core.c481
-rw-r--r--drivers/mfd/sec-core.h23
-rw-r--r--drivers/mfd/sec-i2c.c239
-rw-r--r--drivers/mfd/sec-irq.c460
-rw-r--r--drivers/mfd/sm501.c50
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c5
-rw-r--r--drivers/mfd/stm32-lptimer.c33
-rw-r--r--drivers/mfd/stmpe-spi.c2
-rw-r--r--drivers/mfd/tps65010.c9
-rw-r--r--drivers/mfd/ucb1x00-core.c7
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/mtdcore.c152
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdpart.c16
-rw-r--r--drivers/mtd/nand/ecc-mxic.c2
-rw-r--r--drivers/mtd/nand/qpic_common.c8
-rw-r--r--drivers/mtd/nand/raw/Kconfig9
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c5
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c248
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c13
-rw-r--r--drivers/mtd/nand/raw/loongson1-nand-controller.c836
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c18
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c20
-rw-r--r--drivers/mtd/nand/spi/ato.c14
-rw-r--r--drivers/mtd/nand/spi/core.c20
-rw-r--r--drivers/mtd/nand/spi/esmt.c22
-rw-r--r--drivers/mtd/nand/spi/foresee.c16
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c60
-rw-r--r--drivers/mtd/nand/spi/macronix.c20
-rw-r--r--drivers/mtd/nand/spi/micron.c38
-rw-r--r--drivers/mtd/nand/spi/paragon.c20
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c20
-rw-r--r--drivers/mtd/nand/spi/toshiba.c22
-rw-r--r--drivers/mtd/nand/spi/winbond.c128
-rw-r--r--drivers/mtd/nand/spi/xtx.c20
-rw-r--r--drivers/mtd/spi-nor/macronix.c73
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c2
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/bus.c4
-rw-r--r--drivers/pci/controller/Kconfig8
-rw-r--r--drivers/pci/controller/cadence/Kconfig16
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c40
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c36
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c124
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c12
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h25
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c4
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c4
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c213
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c5
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c252
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c81
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c29
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h32
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c102
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c4
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c12
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h2
-rw-r--r--drivers/pci/controller/pci-host-common.c30
-rw-r--r--drivers/pci/controller/pci-host-common.h20
-rw-r--r--drivers/pci/controller/pci-host-generic.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c99
-rw-r--r--drivers/pci/controller/pci-mvebu.c26
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c1
-rw-r--r--drivers/pci/controller/pcie-apple.c243
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c8
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c10
-rw-r--r--drivers/pci/controller/pcie-rockchip.h7
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c1
-rw-r--r--drivers/pci/devres.c215
-rw-r--r--drivers/pci/ecam.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c26
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c26
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c22
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c73
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c29
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c78
-rw-r--r--drivers/pci/iomap.c16
-rw-r--r--drivers/pci/of.c44
-rw-r--r--drivers/pci/pci-acpi.c23
-rw-r--r--drivers/pci/pci-driver.c8
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.c88
-rw-r--r--drivers/pci/pci.h75
-rw-r--r--drivers/pci/pcie/aer.c442
-rw-r--r--drivers/pci/pcie/bwctrl.c86
-rw-r--r--drivers/pci/pcie/dpc.c75
-rw-r--r--drivers/pci/pcie/err.c1
-rw-r--r--drivers/pci/pcie/ptm.c300
-rw-r--r--drivers/pci/pcie/tlp.c6
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pci/pwrctrl/Kconfig22
-rw-r--r--drivers/pci/pwrctrl/Makefile8
-rw-r--r--drivers/pci/pwrctrl/core.c2
-rw-r--r--drivers/pci/quirks.c33
-rw-r--r--drivers/pci/setup-bus.c16
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pwm/pwm-stm32-lp.c219
-rw-r--r--drivers/regulator/bcm590xx-regulator.c1289
-rw-r--r--drivers/regulator/bd96801-regulator.c455
-rw-r--r--drivers/remoteproc/Makefile6
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c98
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c7
-rw-r--r--drivers/remoteproc/stm32_rproc.c8
-rw-r--r--drivers/remoteproc/ti_k3_common.c551
-rw-r--r--drivers/remoteproc/ti_k3_common.h118
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c616
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c583
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c1018
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c34
-rw-r--r--drivers/rpmsg/qcom_smd.c10
-rw-r--r--drivers/rpmsg/rpmsg_core.c63
-rw-r--r--drivers/rpmsg/rpmsg_internal.h6
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c24
-rw-r--r--drivers/scsi/bnx2fc/Kconfig1
-rw-r--r--drivers/scsi/bnx2i/Kconfig1
-rw-r--r--drivers/spi/spi-qpic-snand.c1
-rw-r--r--drivers/uio/uio_hv_generic.c7
-rw-r--r--drivers/usb/gadget/function/f_hid.c12
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c6
-rw-r--r--drivers/vfio/pci/Kconfig2
-rw-r--r--drivers/video/backlight/backlight.c93
-rw-r--r--drivers/video/backlight/lcd.c108
-rw-r--r--drivers/video/backlight/qcom-wled.c6
-rw-r--r--drivers/video/fbdev/core/fb_backlight.c12
-rw-r--r--drivers/video/fbdev/core/fb_info.c1
-rw-r--r--drivers/video/fbdev/core/fbmem.c82
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c8
-rw-r--r--drivers/watchdog/Kconfig26
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/apple_wdt.c7
-rw-r--r--drivers/watchdog/arm_smc_wdt.c17
-rw-r--r--drivers/watchdog/cros_ec_wdt.c30
-rw-r--r--drivers/watchdog/da9052_wdt.c27
-rw-r--r--drivers/watchdog/iTCO_wdt.c25
-rw-r--r--drivers/watchdog/intel_oc_wdt.c233
-rw-r--r--drivers/watchdog/lenovo_se30_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/pretimeout_noop.c2
-rw-r--r--drivers/watchdog/pretimeout_panic.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c7
-rw-r--r--drivers/watchdog/s32g_wdt.c315
-rw-r--r--drivers/watchdog/s3c2410_wdt.c39
-rw-r--r--drivers/watchdog/stm32_iwdg.c2
-rw-r--r--drivers/watchdog/wdt_pci.c2
352 files changed, 16388 insertions, 6471 deletions
diff --git a/drivers/accel/qaic/Kconfig b/drivers/accel/qaic/Kconfig
index a9f866230058..5e405a19c157 100644
--- a/drivers/accel/qaic/Kconfig
+++ b/drivers/accel/qaic/Kconfig
@@ -8,7 +8,6 @@ config DRM_ACCEL_QAIC
depends on DRM_ACCEL
depends on PCI && HAS_IOMEM
depends on MHI_BUS
- depends on MMU
select CRC32
help
Enables driver for Qualcomm's Cloud AI accelerator PCIe cards that are
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 1687483ff319..76a856c32c4d 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -12,7 +12,7 @@
enum acpi_irq_model_id acpi_irq_model;
-static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
+static acpi_gsi_domain_disp_fn acpi_get_gsi_domain_id;
static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
/**
@@ -307,12 +307,24 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
* for a given GSI
*/
void __init acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *(*fn)(u32))
+ acpi_gsi_domain_disp_fn fn)
{
acpi_irq_model = model;
acpi_get_gsi_domain_id = fn;
}
+/*
+ * acpi_get_gsi_dispatcher() - Get the GSI dispatcher function
+ *
+ * Return the dispatcher function that computes the domain fwnode for
+ * a given GSI.
+ */
+acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void)
+{
+ return acpi_get_gsi_domain_id;
+}
+EXPORT_SYMBOL_GPL(acpi_get_gsi_dispatcher);
+
/**
* acpi_set_gsi_to_irq_fallback - Register a GSI transfer
* callback to fallback to arch specified implementation.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 773799cfd443..79b20da0a256 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6682,12 +6682,6 @@ const struct ata_port_info ata_dummy_port_info = {
};
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-void ata_print_version(const struct device *dev, const char *version)
-{
- dev_printk(KERN_DEBUG, dev, "version %s\n", version);
-}
-EXPORT_SYMBOL(ata_print_version);
-
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b990c1ee0b12..c11d8e634bf7 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3432,7 +3432,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
enum ata_lpm_policy old_policy = link->lpm_policy;
- bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
+ bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
unsigned int err_mask;
int rc;
@@ -3443,28 +3443,35 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
return 0;
/*
- * DIPM is enabled only for MIN_POWER as some devices
- * misbehave when the host NACKs transition to SLUMBER. Order
- * device and link configurations such that the host always
- * allows DIPM requests.
+ * This function currently assumes that it will never be supplied policy
+ * ATA_LPM_UNKNOWN.
+ */
+ if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
+ return 0;
+
+ /*
+ * DIPM is enabled only for ATA_LPM_MIN_POWER,
+ * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
+ * some devices misbehave when the host NACKs transition to SLUMBER.
*/
ata_for_each_dev(dev, link, ENABLED) {
- bool hipm = ata_id_has_hipm(dev->id);
- bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
+ bool dev_has_hipm = ata_id_has_hipm(dev->id);
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
/* find the first enabled and LPM enabled devices */
if (!link_dev)
link_dev = dev;
- if (!lpm_dev && (hipm || dipm))
+ if (!lpm_dev &&
+ (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
lpm_dev = dev;
hints &= ~ATA_LPM_EMPTY;
- if (!hipm)
+ if (!dev_has_hipm)
hints &= ~ATA_LPM_HIPM;
/* disable DIPM before changing link config */
- if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
+ if (dev_has_dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
@@ -3505,10 +3512,16 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
if (ap && ap->slave_link)
ap->slave_link->lpm_policy = policy;
- /* host config updated, enable DIPM if transitioning to MIN_POWER */
+ /*
+ * Host config updated, enable DIPM if transitioning to
+ * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
+ * ATA_LPM_MED_POWER_WITH_DIPM.
+ */
ata_for_each_dev(dev, link, ENABLED) {
- if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
- ata_id_has_dipm(dev->id)) {
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
+ dev_has_dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 2e4463d3a356..cb46ce276bb1 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1509,9 +1509,10 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
struct ata_queued_cmd *qc;
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
- u64 sense_valid, val;
u16 extended_sense;
bool aux_icc_valid;
+ u32 sense_valid;
+ u64 val;
int ret = 0;
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
@@ -1529,8 +1530,7 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
return -EIO;
}
- sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
- ((u64)buf[10] << 16) | ((u64)buf[11] << 24);
+ sense_valid = get_unaligned_le32(&buf[8]);
extended_sense = get_unaligned_le16(&buf[14]);
aux_icc_valid = extended_sense & BIT(15);
@@ -1545,7 +1545,7 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
* If the command does not have any sense data, clear ATA_SENSE.
* Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished.
*/
- if (!(sense_valid & (1ULL << tag))) {
+ if (!(sense_valid & BIT(tag))) {
qc->result_tf.status &= ~ATA_SENSE;
continue;
}
@@ -1634,7 +1634,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
return;
}
- if (!(link->sactive & (1 << tag))) {
+ if (!(link->sactive & BIT(tag))) {
ata_link_err(link, "log page 10h reported inactive tag %d\n",
tag);
return;
@@ -1659,8 +1659,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) {
ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc,
ascq);
- ata_scsi_set_sense_information(dev, qc->scsicmd,
- &qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index c0eb8c67a9ff..a21c9895408d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -216,17 +216,21 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
scsi_build_sense(cmd, d_sense, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
+static void ata_scsi_set_sense_information(struct ata_queued_cmd *qc)
{
u64 information;
- information = ata_tf_read_block(tf, dev);
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(qc->dev,
+ "missing result TF: can't set INFORMATION sense field\n");
+ return;
+ }
+
+ information = ata_tf_read_block(&qc->result_tf, qc->dev);
if (information == U64_MAX)
return;
- scsi_set_sense_information(cmd->sense_buffer,
+ scsi_set_sense_information(qc->scsicmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, information);
}
@@ -971,8 +975,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* ata_gen_ata_sense - generate a SCSI fixed sense block
* @qc: Command that we are erroring out
*
- * Generate sense block for a failed ATA command @qc. Descriptor
- * format is used to accommodate LBA48 block address.
+ * Generate sense block for a failed ATA command @qc.
*
* LOCKING:
* None.
@@ -982,8 +985,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
- unsigned char *sb = cmd->sense_buffer;
- u64 block;
u8 sense_key, asc, ascq;
if (ata_dev_disabled(dev)) {
@@ -1014,12 +1015,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
-
- block = ata_tf_read_block(&qc->result_tf, dev);
- if (block == U64_MAX)
- return;
-
- scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -1679,8 +1674,10 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
ata_scsi_set_passthru_sense_fields(qc);
if (is_ck_cond_request)
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
- } else if (is_error && !have_sense) {
- ata_gen_ata_sense(qc);
+ } else if (is_error) {
+ if (!have_sense)
+ ata_gen_ata_sense(qc);
+ ata_scsi_set_sense_information(qc);
}
ata_qc_done(qc);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0337be4faec7..ce5c628fa6fd 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -141,9 +141,6 @@ extern int ata_scsi_offline_dev(struct ata_device *dev);
extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_scsi_dev_rescan(struct work_struct *work);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index c3042eca6332..f7f5131af937 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1301,32 +1301,32 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
}
if (dimm_test) {
- u8 test_parttern1[40] =
+ u8 test_pattern1[40] =
{0x55,0xAA,'P','r','o','m','i','s','e',' ',
'N','o','t',' ','Y','e','t',' ',
'D','e','f','i','n','e','d',' ',
'1','.','1','0',
'9','8','0','3','1','6','1','2',0,0};
- u8 test_parttern2[40] = {0};
+ u8 test_pattern2[40] = {0};
- pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
- pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x10040, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x40, 40);
- pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
- pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
+ pdc20621_put_to_dimm(host, test_pattern1, 0x10040, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
+ dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
+ pdc20621_get_from_dimm(host, test_pattern2, 0x10040,
40);
dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
- pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern1, 0x40, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
}
/* ECC initiliazation. */
diff --git a/drivers/base/property.c b/drivers/base/property.c
index c1392743df9c..805f75b35115 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -928,22 +928,22 @@ bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
- * device_get_child_node_count - return the number of child nodes for device
- * @dev: Device to count the child nodes for
+ * fwnode_get_child_node_count - return the number of child nodes for a given firmware node
+ * @fwnode: Pointer to the parent firmware node
*
- * Return: the number of child nodes for a given device.
+ * Return: the number of child nodes for a given firmware node.
*/
-unsigned int device_get_child_node_count(const struct device *dev)
+unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *child;
unsigned int count = 0;
- device_for_each_child_node(dev, child)
+ fwnode_for_each_child_node(fwnode, child)
count++;
return count;
}
-EXPORT_SYMBOL_GPL(device_get_child_node_count);
+EXPORT_SYMBOL_GPL(fwnode_get_child_node_count);
bool device_dma_supported(const struct device *dev)
{
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 0d619df03fa9..66ce6b81c7d9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3717,7 +3717,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
- goto setmask_err;
+ goto iomap_err;
}
/* Copy the info we may need later into the private data structure. */
@@ -3733,7 +3733,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
if (!dd->isr_workq) {
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
rv = -ENOMEM;
- goto setmask_err;
+ goto iomap_err;
}
memset(cpu_list, 0, sizeof(cpu_list));
@@ -3830,8 +3830,6 @@ msi_initialize_err:
drop_cpu(dd->work[1].cpu_binding);
drop_cpu(dd->work[2].cpu_binding);
}
-setmask_err:
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
iomap_err:
kfree(dd);
@@ -3907,7 +3905,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
put_disk(dd->disk);
diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c
index 0f7f252c12f4..b75016e0e654 100644
--- a/drivers/block/zram/backend_deflate.c
+++ b/drivers/block/zram/backend_deflate.c
@@ -8,7 +8,7 @@
#include "backend_deflate.h"
/* Use the same value as crypto API */
-#define DEFLATE_DEF_WINBITS 11
+#define DEFLATE_DEF_WINBITS (-11)
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
struct deflate_ctx {
@@ -22,8 +22,10 @@ static void deflate_release_params(struct zcomp_params *params)
static int deflate_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = Z_DEFAULT_COMPRESSION;
+ if (params->deflate.winbits == ZCOMP_PARAM_NOT_SET)
+ params->deflate.winbits = DEFLATE_DEF_WINBITS;
return 0;
}
@@ -57,13 +59,13 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
return -ENOMEM;
ctx->context = zctx;
- sz = zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL);
+ sz = zlib_deflate_workspacesize(params->deflate.winbits, MAX_MEM_LEVEL);
zctx->cctx.workspace = vzalloc(sz);
if (!zctx->cctx.workspace)
goto error;
ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ params->deflate.winbits, DEFLATE_DEF_MEMLEVEL,
Z_DEFAULT_STRATEGY);
if (ret != Z_OK)
goto error;
@@ -73,7 +75,7 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
if (!zctx->dctx.workspace)
goto error;
- ret = zlib_inflateInit2(&zctx->dctx, -DEFLATE_DEF_WINBITS);
+ ret = zlib_inflateInit2(&zctx->dctx, params->deflate.winbits);
if (ret != Z_OK)
goto error;
diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c
index 847f3334eb38..daccd60857eb 100644
--- a/drivers/block/zram/backend_lz4.c
+++ b/drivers/block/zram/backend_lz4.c
@@ -18,7 +18,7 @@ static void lz4_release_params(struct zcomp_params *params)
static int lz4_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4_ACCELERATION_DEFAULT;
return 0;
diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c
index 5f37d5abcaeb..9e8a35dfa56d 100644
--- a/drivers/block/zram/backend_lz4hc.c
+++ b/drivers/block/zram/backend_lz4hc.c
@@ -18,7 +18,7 @@ static void lz4hc_release_params(struct zcomp_params *params)
static int lz4hc_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4HC_DEFAULT_CLEVEL;
return 0;
diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c
index 22c8067536f3..81defb98ed09 100644
--- a/drivers/block/zram/backend_zstd.c
+++ b/drivers/block/zram/backend_zstd.c
@@ -58,7 +58,7 @@ static int zstd_setup_params(struct zcomp_params *params)
return -ENOMEM;
params->drv_data = zp;
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = zstd_default_clevel();
zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 25339ed1e07e..4acffe671a5e 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -5,7 +5,11 @@
#include <linux/mutex.h>
-#define ZCOMP_PARAM_NO_LEVEL INT_MIN
+#define ZCOMP_PARAM_NOT_SET INT_MIN
+
+struct deflate_params {
+ s32 winbits;
+};
/*
* Immutable driver (backend) parameters. The driver may attach private
@@ -17,6 +21,9 @@ struct zcomp_params {
void *dict;
size_t dict_sz;
s32 level;
+ union {
+ struct deflate_params deflate;
+ };
void *drv_data;
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 94e6e9b80bf0..54c57103715f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1276,13 +1276,15 @@ static void comp_params_reset(struct zram *zram, u32 prio)
struct zcomp_params *params = &zram->params[prio];
vfree(params->dict);
- params->level = ZCOMP_PARAM_NO_LEVEL;
+ params->level = ZCOMP_PARAM_NOT_SET;
+ params->deflate.winbits = ZCOMP_PARAM_NOT_SET;
params->dict_sz = 0;
params->dict = NULL;
}
static int comp_params_store(struct zram *zram, u32 prio, s32 level,
- const char *dict_path)
+ const char *dict_path,
+ struct deflate_params *deflate_params)
{
ssize_t sz = 0;
@@ -1300,6 +1302,7 @@ static int comp_params_store(struct zram *zram, u32 prio, s32 level,
zram->params[prio].dict_sz = sz;
zram->params[prio].level = level;
+ zram->params[prio].deflate.winbits = deflate_params->winbits;
return 0;
}
@@ -1308,11 +1311,14 @@ static ssize_t algorithm_params_store(struct device *dev,
const char *buf,
size_t len)
{
- s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL;
+ s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NOT_SET;
char *args, *param, *val, *algo = NULL, *dict_path = NULL;
+ struct deflate_params deflate_params;
struct zram *zram = dev_to_zram(dev);
int ret;
+ deflate_params.winbits = ZCOMP_PARAM_NOT_SET;
+
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, &param, &val);
@@ -1343,6 +1349,13 @@ static ssize_t algorithm_params_store(struct device *dev,
dict_path = val;
continue;
}
+
+ if (!strcmp(param, "deflate.winbits")) {
+ ret = kstrtoint(val, 10, &deflate_params.winbits);
+ if (ret)
+ return ret;
+ continue;
+ }
}
/* Lookup priority by algorithm name */
@@ -1364,7 +1377,7 @@ static ssize_t algorithm_params_store(struct device *dev,
if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
return -EINVAL;
- ret = comp_params_store(zram, prio, level, dict_path);
+ ret = comp_params_store(zram, prio, level, dict_path, &deflate_params);
return ret ? ret : len;
}
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index 928da2f6de69..6e7944ffd7c0 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -5,6 +5,7 @@
* Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -27,6 +28,7 @@ struct stm32_lp_private {
u32 psc;
struct device *dev;
struct clk *clk;
+ u32 version;
};
static struct stm32_lp_private*
@@ -47,12 +49,46 @@ static int stm32_clkevent_lp_shutdown(struct clock_event_device *clkevt)
return 0;
}
-static int stm32_clkevent_lp_set_timer(unsigned long evt,
- struct clock_event_device *clkevt,
- int is_periodic)
+static int stm32mp25_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
{
- struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+ u32 val;
+
+ regmap_read(priv->reg, STM32_LPTIM_CR, &val);
+ if (!FIELD_GET(STM32_LPTIM_ENABLE, val)) {
+ /* Enable LPTIMER to be able to write into IER and ARR registers */
+ regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
+ /*
+ * After setting the ENABLE bit, a delay of two counter clock cycles is needed
+ * before the LPTIM is actually enabled. For 32KHz rate, this makes approximately
+ * 62.5 micro-seconds, round it up.
+ */
+ udelay(63);
+ }
+ /* set next event counter */
+ regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+ /* enable ARR interrupt */
+ regmap_write(priv->reg, STM32_LPTIM_IER, STM32_LPTIM_ARRMIE);
+
+ /* Poll DIEROK and ARROK to ensure register access has completed */
+ ret = regmap_read_poll_timeout_atomic(priv->reg, STM32_LPTIM_ISR, val,
+ (val & STM32_LPTIM_DIEROK_ARROK) ==
+ STM32_LPTIM_DIEROK_ARROK,
+ 10, 500);
+ if (ret) {
+ dev_err(priv->dev, "access to LPTIM timed out\n");
+ /* Disable LPTIMER */
+ regmap_write(priv->reg, STM32_LPTIM_CR, 0);
+ return ret;
+ }
+ /* Clear DIEROK and ARROK flags */
+ regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_DIEROKCF_ARROKCF);
+ return 0;
+}
+
+static void stm32_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
+{
/* disable LPTIMER to be able to write into IER register*/
regmap_write(priv->reg, STM32_LPTIM_CR, 0);
/* enable ARR interrupt */
@@ -61,6 +97,22 @@ static int stm32_clkevent_lp_set_timer(unsigned long evt,
regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
/* set next event counter */
regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+}
+
+static int stm32_clkevent_lp_set_timer(unsigned long evt,
+ struct clock_event_device *clkevt,
+ int is_periodic)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+
+ if (priv->version == STM32_LPTIM_VERR_23) {
+ ret = stm32mp25_clkevent_lp_set_evt(priv, evt);
+ if (ret)
+ return ret;
+ } else {
+ stm32_clkevent_lp_set_evt(priv, evt);
+ }
/* start counter */
if (is_periodic)
@@ -176,6 +228,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return -ENOMEM;
priv->reg = ddata->regmap;
+ priv->version = ddata->version;
priv->clk = ddata->clk;
ret = clk_prepare_enable(priv->clk);
if (ret)
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index cf1ba673b8c2..48b7314afdb8 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -114,6 +114,77 @@ config CXL_FEATURES
If unsure say 'n'
+config CXL_EDAC_MEM_FEATURES
+ bool "CXL: EDAC Memory Features"
+ depends on EXPERT
+ depends on CXL_MEM
+ depends on CXL_FEATURES
+ depends on EDAC >= CXL_BUS
+ help
+ The CXL EDAC memory feature is optional and allows host to
+ control the EDAC memory features configurations of CXL memory
+ expander devices.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory RAS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_SCRUB
+ bool "Enable CXL Patrol Scrub Control (Patrol Read)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_SCRUB
+ help
+ The CXL EDAC scrub control is optional and allows host to
+ control the scrub feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' and 'cxl_region' EDAC devices are
+ published with memory scrub control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-scrub.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory scrub feature established by the platform/device
+ (e.g. scrub rates for the patrol scrub feature).
+ Otherwise say 'n'.
+
+config CXL_EDAC_ECS
+ bool "Enable CXL Error Check Scrub (Repair)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_ECS
+ help
+ The CXL EDAC ECS control is optional and allows host to
+ control the ECS feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ ECS control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-ecs.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory ECS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_MEM_REPAIR
+ bool "Enable CXL Memory Repair"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_MEM_REPAIR
+ help
+ The CXL EDAC memory repair control is optional and allows host
+ to control the memory repair features (e.g. sparing, PPR)
+ configurations of CXL memory expander devices.
+
+ When enabled, the memory repair feature requires an additional
+ memory of approximately 43KB to store CXL DRAM and CXL general
+ media event records.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ repair control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-memory-repair.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory repair feature established by the platform/device.
+ Otherwise say 'n'.
+
config CXL_PORT
default CXL_BUS
tristate
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index cb14829bb9be..a1a99ec3f12c 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -11,8 +11,6 @@
#include "cxlpci.h"
#include "cxl.h"
-#define CXL_RCRB_SIZE SZ_8K
-
struct cxl_cxims_data {
int nr_maps;
u64 xormaps[] __counted_by(nr_maps);
@@ -421,7 +419,15 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = cxl_decoder_add(cxld, target_map);
if (rc)
return rc;
- return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
+
+ rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
+ if (rc)
+ return rc;
+
+ dev_dbg(root_port->dev.parent, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&root_port->dev));
+
+ return 0;
}
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -479,7 +485,11 @@ static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
chbs = (struct acpi_cedt_chbs *) header;
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
- chbs->length != CXL_RCRB_SIZE)
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11)
+ return 0;
+
+ if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 &&
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20)
return 0;
if (!chbs->base)
@@ -739,10 +749,10 @@ static void remove_cxl_resources(void *data)
* expanding its boundaries to ensure that any conflicting resources become
* children. If a window is expanded it may then conflict with a another window
* entry and require the window to be truncated or trimmed. Consider this
- * situation:
+ * situation::
*
- * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
- * |--------------- "System RAM" -------------|
+ * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
+ * |--------------- "System RAM" -------------|
*
* ...where platform firmware has established as System RAM resource across 2
* windows, but has left some portion of window 1 for dynamic CXL region
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 086df97a0fcf..79e2ef81fde8 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -20,3 +20,4 @@ cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
cxl_core-$(CONFIG_CXL_MCE) += mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += features.o
+cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += edac.o
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index edb4f41eeacc..0ccef2f2a26a 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -28,7 +28,7 @@ static u32 cdat_normalize(u16 entry, u64 base, u8 type)
*/
if (entry == 0xffff || !entry)
return 0;
- else if (base > (UINT_MAX / (entry)))
+ if (base > (UINT_MAX / (entry)))
return 0;
/*
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 17b692eb3257..29b61828a847 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -76,7 +76,7 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
struct dentry *cxl_debugfs_create_dir(const char *dir);
int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
enum cxl_partition_mode mode);
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
@@ -124,6 +124,8 @@ int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
int nid, resource_size_t *size);
#ifdef CONFIG_CXL_FEATURES
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
enum cxl_get_feat_selection selection,
void *feat_out, size_t feat_out_size, u16 offset,
diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c
new file mode 100644
index 000000000000..2cbc664e5d62
--- /dev/null
+++ b/drivers/cxl/core/edac.c
@@ -0,0 +1,2102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CXL EDAC memory feature driver.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ *
+ * - Supports functions to configure EDAC features of the
+ * CXL memory devices.
+ * - Registers with the EDAC device subsystem driver to expose
+ * the features sysfs attributes to the user for configuring
+ * CXL memory RAS feature.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/edac.h>
+#include <linux/limits.h>
+#include <linux/unaligned.h>
+#include <linux/xarray.h>
+#include <cxl/features.h>
+#include <cxl.h>
+#include <cxlmem.h>
+#include "core.h"
+#include "trace.h"
+
+#define CXL_NR_EDAC_DEV_FEATURES 7
+
+#define CXL_SCRUB_NO_REGION -1
+
+struct cxl_patrol_scrub_context {
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-222 Device Patrol Scrub Control
+ * Feature Readable Attributes.
+ */
+struct cxl_scrub_rd_attrbs {
+ u8 scrub_cycle_cap;
+ __le16 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-223 Device Patrol Scrub Control
+ * Feature Writable Attributes.
+ */
+struct cxl_scrub_wr_attrbs {
+ u8 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+#define CXL_SCRUB_CONTROL_CHANGEABLE BIT(0)
+#define CXL_SCRUB_CONTROL_REALTIME BIT(1)
+#define CXL_SCRUB_CONTROL_CYCLE_MASK GENMASK(7, 0)
+#define CXL_SCRUB_CONTROL_MIN_CYCLE_MASK GENMASK(15, 8)
+#define CXL_SCRUB_CONTROL_ENABLE BIT(0)
+
+#define CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CHANGEABLE, cap)
+#define CXL_GET_SCRUB_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_MIN_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_MIN_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_EN_STS(flags) FIELD_GET(CXL_SCRUB_CONTROL_ENABLE, flags)
+
+#define CXL_SET_SCRUB_CYCLE(cycle) \
+ FIELD_PREP(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_SET_SCRUB_EN(en) FIELD_PREP(CXL_SCRUB_CONTROL_ENABLE, en)
+
+static int cxl_mem_scrub_get_attrbs(struct cxl_mailbox *cxl_mbox, u8 *cap,
+ u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ size_t rd_data_size = sizeof(struct cxl_scrub_rd_attrbs);
+ size_t data_size;
+ struct cxl_scrub_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ *cap = rd_attrbs->scrub_cycle_cap;
+ *cycle = le16_to_cpu(rd_attrbs->scrub_cycle_hours);
+ *flags = rd_attrbs->scrub_flags;
+ if (min_cycle)
+ *min_cycle = CXL_GET_SCRUB_MIN_CYCLE(*cycle);
+
+ return 0;
+}
+
+static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ struct cxl_mailbox *cxl_mbox;
+ u8 min_scrub_cycle = U8_MAX;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ int i, ret;
+
+ if (!cxl_ps_ctx->cxlr) {
+ cxl_mbox = &cxl_ps_ctx->cxlmd->cxlds->cxl_mbox;
+ return cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle,
+ flags, min_cycle);
+ }
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle, flags,
+ min_cycle);
+ if (ret)
+ return ret;
+
+ if (min_cycle)
+ min_scrub_cycle = min(*min_cycle, min_scrub_cycle);
+ }
+
+ if (min_cycle)
+ *min_cycle = min_scrub_cycle;
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_region(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ int ret, i;
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten by region%d scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle,
+ cxlmd->scrub_region_id, cxlr->id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = cxlr->id;
+ }
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_device(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_memdev *cxlmd;
+ int ret;
+
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ cxlmd = cxl_ps_ctx->cxlmd;
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET, 0,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten with device local scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle, cxlmd->scrub_region_id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ if (cxl_ps_ctx->cxlr)
+ return cxl_scrub_set_attrbs_region(dev, cxl_ps_ctx, cycle, flags);
+
+ return cxl_scrub_set_attrbs_device(dev, cxl_ps_ctx, cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_enabled_bg(struct device *dev, void *drv_data,
+ bool *enabled)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *enabled = CXL_GET_SCRUB_EN_STS(flags);
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_enabled_bg(struct device *dev, void *drv_data,
+ bool enable)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, wr_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ wr_cycle = CXL_GET_SCRUB_CYCLE(rd_cycle);
+ flags = CXL_SET_SCRUB_EN(enable);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_min_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *min)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, min_cycle;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ *min = min_cycle * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_max_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *max)
+{
+ *max = U8_MAX * 3600; /* Max set by register size */
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_scrub_cycle(struct device *dev, void *drv_data,
+ u32 *scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *scrub_cycle_secs = CXL_GET_SCRUB_CYCLE(cycle) * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_scrub_cycle(struct device *dev, void *drv_data,
+ u32 scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 scrub_cycle_hours = scrub_cycle_secs / 3600;
+ u8 cap, wr_cycle, flags, min_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ if (!CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap))
+ return -EOPNOTSUPP;
+
+ if (scrub_cycle_hours < min_cycle) {
+ dev_dbg(dev, "Invalid CXL patrol scrub cycle(%d) to set\n",
+ scrub_cycle_hours);
+ dev_dbg(dev,
+ "Minimum supported CXL patrol scrub cycle in hour %d\n",
+ min_cycle);
+ return -EINVAL;
+ }
+ wr_cycle = CXL_SET_SCRUB_CYCLE(scrub_cycle_hours);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static const struct edac_scrub_ops cxl_ps_scrub_ops = {
+ .get_enabled_bg = cxl_patrol_scrub_get_enabled_bg,
+ .set_enabled_bg = cxl_patrol_scrub_set_enabled_bg,
+ .get_min_cycle = cxl_patrol_scrub_get_min_scrub_cycle,
+ .get_max_cycle = cxl_patrol_scrub_get_max_scrub_cycle,
+ .get_cycle_duration = cxl_patrol_scrub_get_scrub_cycle,
+ .set_cycle_duration = cxl_patrol_scrub_set_scrub_cycle,
+};
+
+static int cxl_memdev_scrub_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_feat_entry *feat_entry;
+ u8 cap, flags;
+ u16 cycle;
+ int rc;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_ps_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlmd = cxlmd,
+ };
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap, &cycle,
+ &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+static int cxl_region_scrub_init(struct cxl_region *cxlr,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_feat_entry *feat_entry = NULL;
+ struct cxl_memdev *cxlmd;
+ u8 cap, flags;
+ u16 cycle;
+ int i, rc;
+
+ /*
+ * The cxl_region_rwsem must be held if the code below is used in a context
+ * other than when the region is in the probe state, as shown here.
+ */
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) &
+ CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap,
+ &cycle, &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ cxl_ps_ctx = devm_kzalloc(&cxlr->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlr = cxlr,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+struct cxl_ecs_context {
+ u16 num_media_frus;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-225 DDR5 ECS Control Feature
+ * Readable Attributes.
+ */
+struct cxl_ecs_fru_rd_attrbs {
+ u8 ecs_cap;
+ __le16 ecs_config;
+ u8 ecs_flags;
+} __packed;
+
+struct cxl_ecs_rd_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_rd_attrbs fru_attrbs[];
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-226 DDR5 ECS Control Feature
+ * Writable Attributes.
+ */
+struct cxl_ecs_fru_wr_attrbs {
+ __le16 ecs_config;
+} __packed;
+
+struct cxl_ecs_wr_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_wr_attrbs fru_attrbs[];
+} __packed;
+
+#define CXL_ECS_LOG_ENTRY_TYPE_MASK GENMASK(1, 0)
+#define CXL_ECS_REALTIME_REPORT_CAP_MASK BIT(0)
+#define CXL_ECS_THRESHOLD_COUNT_MASK GENMASK(2, 0)
+#define CXL_ECS_COUNT_MODE_MASK BIT(3)
+#define CXL_ECS_RESET_COUNTER_MASK BIT(4)
+#define CXL_ECS_RESET_COUNTER 1
+
+enum {
+ ECS_THRESHOLD_256 = 256,
+ ECS_THRESHOLD_1024 = 1024,
+ ECS_THRESHOLD_4096 = 4096,
+};
+
+enum {
+ ECS_THRESHOLD_IDX_256 = 3,
+ ECS_THRESHOLD_IDX_1024 = 4,
+ ECS_THRESHOLD_IDX_4096 = 5,
+};
+
+static const u16 ecs_supp_threshold[] = {
+ [ECS_THRESHOLD_IDX_256] = 256,
+ [ECS_THRESHOLD_IDX_1024] = 1024,
+ [ECS_THRESHOLD_IDX_4096] = 4096,
+};
+
+enum {
+ ECS_LOG_ENTRY_TYPE_DRAM = 0x0,
+ ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU = 0x1,
+};
+
+enum cxl_ecs_count_mode {
+ ECS_MODE_COUNTS_ROWS = 0,
+ ECS_MODE_COUNTS_CODEWORDS = 1,
+};
+
+static int cxl_mem_ecs_get_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 *log_cap, u16 *config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ size_t rd_data_size;
+ size_t data_size;
+
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ *log_cap = rd_attrbs->ecs_log_cap;
+ *config = le16_to_cpu(fru_rd_attrbs[fru_id].ecs_config);
+
+ return 0;
+}
+
+static int cxl_mem_ecs_set_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 log_cap, u16 config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ struct cxl_ecs_fru_wr_attrbs *fru_wr_attrbs;
+ size_t rd_data_size, wr_data_size;
+ u16 num_media_frus, count;
+ size_t data_size;
+
+ num_media_frus = cxl_ecs_ctx->num_media_frus;
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+ wr_data_size = cxl_ecs_ctx->set_feat_size;
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ struct cxl_ecs_wr_attrbs *wr_attrbs __free(kvfree) =
+ kvzalloc(wr_data_size, GFP_KERNEL);
+ if (!wr_attrbs)
+ return -ENOMEM;
+
+ /*
+ * Fill writable attributes from the current attributes read
+ * for all the media FRUs.
+ */
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ fru_wr_attrbs = wr_attrbs->fru_attrbs;
+ wr_attrbs->ecs_log_cap = log_cap;
+ for (count = 0; count < num_media_frus; count++)
+ fru_wr_attrbs[count].ecs_config =
+ fru_rd_attrbs[count].ecs_config;
+
+ fru_wr_attrbs[fru_id].ecs_config = cpu_to_le16(config);
+
+ return cxl_set_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ cxl_ecs_ctx->set_version, wr_attrbs,
+ wr_data_size,
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+}
+
+static u8 cxl_get_ecs_log_entry_type(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_LOG_ENTRY_TYPE_MASK, log_cap);
+}
+
+static u16 cxl_get_ecs_threshold(u8 log_cap, u16 config)
+{
+ u8 index = FIELD_GET(CXL_ECS_THRESHOLD_COUNT_MASK, config);
+
+ return ecs_supp_threshold[index];
+}
+
+static u8 cxl_get_ecs_count_mode(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_COUNT_MODE_MASK, config);
+}
+
+#define CXL_ECS_GET_ATTR(attrb) \
+ static int cxl_ecs_get_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 *val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ *val = cxl_get_ecs_##attrb(log_cap, config); \
+ \
+ return 0; \
+ }
+
+CXL_ECS_GET_ATTR(log_entry_type)
+CXL_ECS_GET_ATTR(count_mode)
+CXL_ECS_GET_ATTR(threshold)
+
+static int cxl_set_ecs_log_entry_type(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != ECS_LOG_ENTRY_TYPE_DRAM &&
+ val != ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU)
+ return -EINVAL;
+
+ *log_cap = FIELD_PREP(CXL_ECS_LOG_ENTRY_TYPE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ *config &= ~CXL_ECS_THRESHOLD_COUNT_MASK;
+
+ switch (val) {
+ case ECS_THRESHOLD_256:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_256);
+ break;
+ case ECS_THRESHOLD_1024:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_1024);
+ break;
+ case ECS_THRESHOLD_4096:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_4096);
+ break;
+ default:
+ dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n",
+ val);
+ dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n",
+ ECS_THRESHOLD_256, ECS_THRESHOLD_1024,
+ ECS_THRESHOLD_4096);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxl_set_ecs_count_mode(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ if (val != ECS_MODE_COUNTS_ROWS && val != ECS_MODE_COUNTS_CODEWORDS) {
+ dev_dbg(dev, "Invalid CXL ECS scrub mode(%d) to set\n", val);
+ dev_dbg(dev,
+ "Supported ECS Modes: 0: ECS counts rows with errors,"
+ " 1: ECS counts codewords with errors\n");
+ return -EINVAL;
+ }
+
+ *config &= ~CXL_ECS_COUNT_MODE_MASK;
+ *config |= FIELD_PREP(CXL_ECS_COUNT_MODE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_reset_counter(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != CXL_ECS_RESET_COUNTER)
+ return -EINVAL;
+
+ *config &= ~CXL_ECS_RESET_COUNTER_MASK;
+ *config |= FIELD_PREP(CXL_ECS_RESET_COUNTER_MASK, val);
+
+ return 0;
+}
+
+#define CXL_ECS_SET_ATTR(attrb) \
+ static int cxl_ecs_set_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ if (!capable(CAP_SYS_RAWIO)) \
+ return -EPERM; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ ret = cxl_set_ecs_##attrb(dev, &log_cap, &config, val); \
+ if (ret) \
+ return ret; \
+ \
+ return cxl_mem_ecs_set_attrbs(dev, ctx, fru_id, log_cap, \
+ config); \
+ }
+CXL_ECS_SET_ATTR(log_entry_type)
+CXL_ECS_SET_ATTR(count_mode)
+CXL_ECS_SET_ATTR(reset_counter)
+CXL_ECS_SET_ATTR(threshold)
+
+static const struct edac_ecs_ops cxl_ecs_ops = {
+ .get_log_entry_type = cxl_ecs_get_log_entry_type,
+ .set_log_entry_type = cxl_ecs_set_log_entry_type,
+ .get_mode = cxl_ecs_get_count_mode,
+ .set_mode = cxl_ecs_set_count_mode,
+ .reset = cxl_ecs_set_reset_counter,
+ .get_threshold = cxl_ecs_get_threshold,
+ .set_threshold = cxl_ecs_set_threshold,
+};
+
+static int cxl_memdev_ecs_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature)
+{
+ struct cxl_ecs_context *cxl_ecs_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int num_media_frus;
+
+ feat_entry =
+ cxl_feature_info(to_cxlfs(cxlmd->cxlds), &CXL_FEAT_ECS_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ num_media_frus = (le16_to_cpu(feat_entry->get_feat_size) -
+ sizeof(struct cxl_ecs_rd_attrbs)) /
+ sizeof(struct cxl_ecs_fru_rd_attrbs);
+ if (!num_media_frus)
+ return -EOPNOTSUPP;
+
+ cxl_ecs_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ecs_ctx), GFP_KERNEL);
+ if (!cxl_ecs_ctx)
+ return -ENOMEM;
+
+ *cxl_ecs_ctx = (struct cxl_ecs_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .num_media_frus = num_media_frus,
+ .cxlmd = cxlmd,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_ECS;
+ ras_feature->ecs_ops = &cxl_ecs_ops;
+ ras_feature->ctx = cxl_ecs_ctx;
+ ras_feature->ecs_info.num_media_frus = num_media_frus;
+
+ return 0;
+}
+
+/*
+ * Perform Maintenance CXL 3.2 Spec 8.2.10.7.1
+ */
+
+/*
+ * Perform Maintenance input payload
+ * CXL rev 3.2 section 8.2.10.7.1 Table 8-117
+ */
+struct cxl_mbox_maintenance_hdr {
+ u8 op_class;
+ u8 op_subclass;
+} __packed;
+
+static int cxl_perform_maintenance(struct cxl_mailbox *cxl_mbox, u8 class,
+ u8 subclass, void *data_in,
+ size_t data_in_size)
+{
+ struct cxl_memdev_maintenance_pi {
+ struct cxl_mbox_maintenance_hdr hdr;
+ u8 data[];
+ } __packed;
+ struct cxl_mbox_cmd mbox_cmd;
+ size_t hdr_size;
+
+ struct cxl_memdev_maintenance_pi *pi __free(kvfree) =
+ kvzalloc(cxl_mbox->payload_size, GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ pi->hdr.op_class = class;
+ pi->hdr.op_subclass = subclass;
+ hdr_size = sizeof(pi->hdr);
+ /*
+ * Check minimum mbox payload size is available for
+ * the maintenance data transfer.
+ */
+ if (hdr_size + data_in_size > cxl_mbox->payload_size)
+ return -ENOMEM;
+
+ memcpy(pi->data, data_in, data_in_size);
+ mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_DO_MAINTENANCE,
+ .size_in = hdr_size + data_in_size,
+ .payload_in = pi,
+ };
+
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+}
+
+/*
+ * Support for finding a memory operation attributes
+ * are from the current boot or not.
+ */
+
+struct cxl_mem_err_rec {
+ struct xarray rec_gen_media;
+ struct xarray rec_dram;
+};
+
+enum cxl_mem_repair_type {
+ CXL_PPR,
+ CXL_CACHELINE_SPARING,
+ CXL_ROW_SPARING,
+ CXL_BANK_SPARING,
+ CXL_RANK_SPARING,
+ CXL_REPAIR_MAX,
+};
+
+/**
+ * struct cxl_mem_repair_attrbs - CXL memory repair attributes
+ * @dpa: DPA of memory to repair
+ * @nibble_mask: nibble mask, identifies one or more nibbles on the memory bus
+ * @row: row of memory to repair
+ * @column: column of memory to repair
+ * @channel: channel of memory to repair
+ * @sub_channel: sub channel of memory to repair
+ * @rank: rank of memory to repair
+ * @bank_group: bank group of memory to repair
+ * @bank: bank of memory to repair
+ * @repair_type: repair type. For eg. PPR, memory sparing etc.
+ */
+struct cxl_mem_repair_attrbs {
+ u64 dpa;
+ u32 nibble_mask;
+ u32 row;
+ u16 column;
+ u8 channel;
+ u8 sub_channel;
+ u8 rank;
+ u8 bank_group;
+ u8 bank;
+ enum cxl_mem_repair_type repair_type;
+};
+
+static struct cxl_event_gen_media *
+cxl_find_rec_gen_media(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_gen_media, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ if (attrbs->repair_type == CXL_PPR)
+ return rec;
+
+ return NULL;
+}
+
+static struct cxl_event_dram *
+cxl_find_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ u16 validity_flags;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_dram, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+ if (!(validity_flags & CXL_DER_VALID_CHANNEL) ||
+ !(validity_flags & CXL_DER_VALID_RANK))
+ return NULL;
+
+ switch (attrbs->repair_type) {
+ case CXL_PPR:
+ if (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) == attrbs->nibble_mask)
+ return rec;
+ break;
+ case CXL_CACHELINE_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW) ||
+ !(validity_flags & CXL_DER_VALID_COLUMN))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ get_unaligned_le16(rec->column) == attrbs->column &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask) &&
+ (!(validity_flags & CXL_DER_VALID_SUB_CHANNEL) ||
+ rec->sub_channel == attrbs->sub_channel))
+ return rec;
+ break;
+ case CXL_ROW_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_BANK_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_RANK_SPARING:
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+#define CXL_MAX_STORAGE_DAYS 10
+#define CXL_MAX_STORAGE_TIME_SECS (CXL_MAX_STORAGE_DAYS * 24 * 60 * 60)
+
+static void cxl_del_expired_gmedia_recs(struct xarray *rec_xarray,
+ struct cxl_event_gen_media *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_gen_media *rec;
+ unsigned long index;
+ u64 delta_ts_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_ts_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_ts_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+static void cxl_del_expired_dram_recs(struct xarray *rec_xarray,
+ struct cxl_event_dram *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_dram *rec;
+ unsigned long index;
+ u64 delta_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+#define CXL_MAX_REC_STORAGE_COUNT 200
+
+static void cxl_del_overflow_old_recs(struct xarray *rec_xarray)
+{
+ void *err_rec;
+ unsigned long index, count = 0;
+
+ xa_for_each(rec_xarray, index, err_rec)
+ count++;
+
+ if (count <= CXL_MAX_REC_STORAGE_COUNT)
+ return;
+
+ count -= CXL_MAX_REC_STORAGE_COUNT;
+ xa_for_each(rec_xarray, index, err_rec) {
+ xa_erase(rec_xarray, index);
+ kfree(err_rec);
+ count--;
+ if (!count)
+ break;
+ }
+}
+
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->gen_media, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_gen_media,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec))
+ return xa_err(old_rec);
+
+ kfree(old_rec);
+
+ cxl_del_expired_gmedia_recs(&array_rec->rec_gen_media, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_gen_media);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_gen_media, "CXL");
+
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->dram, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_dram,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec))
+ return xa_err(old_rec);
+
+ kfree(old_rec);
+
+ cxl_del_expired_dram_recs(&array_rec->rec_dram, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_dram);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_dram, "CXL");
+
+static bool cxl_is_memdev_memory_online(const struct cxl_memdev *cxlmd)
+{
+ struct cxl_port *port = cxlmd->endpoint;
+
+ if (port && cxl_num_decoders_committed(port))
+ return true;
+
+ return false;
+}
+
+/*
+ * CXL memory sparing control
+ */
+enum cxl_mem_sparing_granularity {
+ CXL_MEM_SPARING_CACHELINE,
+ CXL_MEM_SPARING_ROW,
+ CXL_MEM_SPARING_BANK,
+ CXL_MEM_SPARING_RANK,
+ CXL_MEM_SPARING_MAX
+};
+
+struct cxl_mem_sparing_context {
+ struct cxl_memdev *cxlmd;
+ uuid_t repair_uuid;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u16 effects;
+ u8 instance;
+ u8 get_version;
+ u8 set_version;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_safe_when_in_use;
+ bool cap_hard_sparing;
+ bool cap_soft_sparing;
+ u8 channel;
+ u8 rank;
+ u8 bank_group;
+ u32 nibble_mask;
+ u64 dpa;
+ u32 row;
+ u16 column;
+ u8 bank;
+ u8 sub_channel;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+};
+
+#define CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK BIT(0)
+#define CXL_SPARING_RD_CAP_HARD_SPARING_MASK BIT(1)
+#define CXL_SPARING_RD_CAP_SOFT_SPARING_MASK BIT(2)
+
+#define CXL_SPARING_WR_DEVICE_INITIATED_MASK BIT(0)
+
+#define CXL_SPARING_QUERY_RESOURCE_FLAG BIT(0)
+#define CXL_SET_HARD_SPARING_FLAG BIT(1)
+#define CXL_SPARING_SUB_CHNL_VALID_FLAG BIT(2)
+#define CXL_SPARING_NIB_MASK_VALID_FLAG BIT(3)
+
+#define CXL_GET_SPARING_SAFE_IN_USE(flags) \
+ (FIELD_GET(CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK, \
+ flags) ^ 1)
+#define CXL_GET_CAP_HARD_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_HARD_SPARING_MASK, \
+ flags)
+#define CXL_GET_CAP_SOFT_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_SOFT_SPARING_MASK, \
+ flags)
+
+#define CXL_SET_SPARING_QUERY_RESOURCE(val) \
+ FIELD_PREP(CXL_SPARING_QUERY_RESOURCE_FLAG, val)
+#define CXL_SET_HARD_SPARING(val) \
+ FIELD_PREP(CXL_SET_HARD_SPARING_FLAG, val)
+#define CXL_SET_SPARING_SUB_CHNL_VALID(val) \
+ FIELD_PREP(CXL_SPARING_SUB_CHNL_VALID_FLAG, val)
+#define CXL_SET_SPARING_NIB_MASK_VALID(val) \
+ FIELD_PREP(CXL_SPARING_NIB_MASK_VALID_FLAG, val)
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.2.3 Table 8-134 Memory Sparing Feature
+ * Readable Attributes.
+ */
+struct cxl_memdev_repair_rd_attrbs_hdr {
+ u8 max_op_latency;
+ __le16 op_cap;
+ __le16 op_mode;
+ u8 op_class;
+ u8 op_subclass;
+ u8 rsvd[9];
+} __packed;
+
+struct cxl_memdev_sparing_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 rsvd;
+ __le16 restriction_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.1.4 Table 8-120 Memory Sparing Input Payload.
+ */
+struct cxl_memdev_sparing_in_payload {
+ u8 flags;
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ __le16 column;
+ u8 sub_channel;
+} __packed;
+
+static int
+cxl_mem_sparing_get_attrbs(struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_sparing_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+ struct cxl_memdev_sparing_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_sparing_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_sparing_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_sparing_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_sparing_ctx->cap_safe_when_in_use =
+ CXL_GET_SPARING_SAFE_IN_USE(restriction_flags);
+ cxl_sparing_ctx->cap_hard_sparing =
+ CXL_GET_CAP_HARD_SPARING(restriction_flags);
+ cxl_sparing_ctx->cap_soft_sparing =
+ CXL_GET_CAP_SOFT_SPARING(restriction_flags);
+
+ return 0;
+}
+
+static struct cxl_event_dram *
+cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_sparing_context *ctx)
+{
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+
+ attrbs.dpa = ctx->dpa;
+ attrbs.channel = ctx->channel;
+ attrbs.rank = ctx->rank;
+ attrbs.nibble_mask = ctx->nibble_mask;
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ attrbs.repair_type = CXL_CACHELINE_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ attrbs.column = ctx->column;
+ attrbs.sub_channel = ctx->sub_channel;
+ break;
+ case EDAC_REPAIR_ROW_SPARING:
+ attrbs.repair_type = CXL_ROW_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ break;
+ case EDAC_REPAIR_BANK_SPARING:
+ attrbs.repair_type = CXL_BANK_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ break;
+ case EDAC_REPAIR_RANK_SPARING:
+ attrbs.repair_type = CXL_BANK_SPARING;
+ break;
+ default:
+ return NULL;
+ }
+
+ return cxl_find_rec_dram(cxlmd, &attrbs);
+}
+
+static int
+cxl_mem_perform_sparing(struct device *dev,
+ struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_memdev_sparing_in_payload sparing_pi;
+ struct cxl_event_dram *rec = NULL;
+ u16 validity_flags = 0;
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_dpa_rwsem);
+ if (!dpa_lock)
+ return -EINTR;
+
+ if (!cxl_sparing_ctx->cap_safe_when_in_use) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ rec = cxl_mem_get_rec_dram(cxlmd, cxl_sparing_ctx);
+ if (!rec)
+ return -EINVAL;
+
+ if (!get_unaligned_le16(rec->media_hdr.validity_flags))
+ return -EINVAL;
+ }
+ }
+
+ memset(&sparing_pi, 0, sizeof(sparing_pi));
+ sparing_pi.flags = CXL_SET_SPARING_QUERY_RESOURCE(0);
+ if (cxl_sparing_ctx->persist_mode)
+ sparing_pi.flags |= CXL_SET_HARD_SPARING(1);
+
+ if (rec)
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+
+ switch (cxl_sparing_ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ sparing_pi.column = cpu_to_le16(cxl_sparing_ctx->column);
+ if (!rec || (validity_flags & CXL_DER_VALID_SUB_CHANNEL)) {
+ sparing_pi.flags |= CXL_SET_SPARING_SUB_CHNL_VALID(1);
+ sparing_pi.sub_channel = cxl_sparing_ctx->sub_channel;
+ }
+ fallthrough;
+ case EDAC_REPAIR_ROW_SPARING:
+ put_unaligned_le24(cxl_sparing_ctx->row, sparing_pi.row);
+ fallthrough;
+ case EDAC_REPAIR_BANK_SPARING:
+ sparing_pi.bank_group = cxl_sparing_ctx->bank_group;
+ sparing_pi.bank = cxl_sparing_ctx->bank;
+ fallthrough;
+ case EDAC_REPAIR_RANK_SPARING:
+ sparing_pi.rank = cxl_sparing_ctx->rank;
+ fallthrough;
+ default:
+ sparing_pi.channel = cxl_sparing_ctx->channel;
+ if ((rec && (validity_flags & CXL_DER_VALID_NIBBLE)) ||
+ (!rec && (!cxl_sparing_ctx->nibble_mask ||
+ (cxl_sparing_ctx->nibble_mask & 0xFFFFFF)))) {
+ sparing_pi.flags |= CXL_SET_SPARING_NIB_MASK_VALID(1);
+ put_unaligned_le24(cxl_sparing_ctx->nibble_mask,
+ sparing_pi.nibble_mask);
+ }
+ break;
+ }
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_sparing_ctx->op_class,
+ cxl_sparing_ctx->op_subclass,
+ &sparing_pi, sizeof(sparing_pi));
+}
+
+static int cxl_mem_sparing_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ case EDAC_REPAIR_ROW_SPARING:
+ case EDAC_REPAIR_BANK_SPARING:
+ case EDAC_REPAIR_RANK_SPARING:
+ *repair_type = edac_repair_type[ctx->repair_type];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define CXL_SPARING_GET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_get_##attrb( \
+ struct device *dev, void *drv_data, data_type *val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ *val = ctx->attrb; \
+ \
+ return 0; \
+ }
+CXL_SPARING_GET_ATTR(persist_mode, bool)
+CXL_SPARING_GET_ATTR(dpa, u64)
+CXL_SPARING_GET_ATTR(nibble_mask, u32)
+CXL_SPARING_GET_ATTR(bank_group, u32)
+CXL_SPARING_GET_ATTR(bank, u32)
+CXL_SPARING_GET_ATTR(rank, u32)
+CXL_SPARING_GET_ATTR(row, u32)
+CXL_SPARING_GET_ATTR(column, u32)
+CXL_SPARING_GET_ATTR(channel, u32)
+CXL_SPARING_GET_ATTR(sub_channel, u32)
+
+#define CXL_SPARING_SET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_set_##attrb(struct device *dev, \
+ void *drv_data, data_type val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ ctx->attrb = val; \
+ \
+ return 0; \
+ }
+CXL_SPARING_SET_ATTR(nibble_mask, u32)
+CXL_SPARING_SET_ATTR(bank_group, u32)
+CXL_SPARING_SET_ATTR(bank, u32)
+CXL_SPARING_SET_ATTR(rank, u32)
+CXL_SPARING_SET_ATTR(row, u32)
+CXL_SPARING_SET_ATTR(column, u32)
+CXL_SPARING_SET_ATTR(channel, u32)
+CXL_SPARING_SET_ATTR(sub_channel, u32)
+
+static int cxl_mem_sparing_set_persist_mode(struct device *dev, void *drv_data,
+ bool persist_mode)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if ((persist_mode && ctx->cap_hard_sparing) ||
+ (!persist_mode && ctx->cap_soft_sparing))
+ ctx->persist_mode = persist_mode;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int cxl_get_mem_sparing_safe_when_in_use(struct device *dev,
+ void *drv_data, bool *safe)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ *safe = ctx->cap_safe_when_in_use;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_min_dpa(struct device *dev, void *drv_data,
+ u64 *min_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_max_dpa(struct device *dev, void *drv_data,
+ u64 *max_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ return -EINVAL;
+
+ ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_do_mem_sparing(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if (val != EDAC_DO_MEM_REPAIR)
+ return -EINVAL;
+
+ return cxl_mem_perform_sparing(dev, ctx);
+}
+
+#define RANK_OPS \
+ .get_repair_type = cxl_mem_sparing_get_repair_type, \
+ .get_persist_mode = cxl_mem_sparing_get_persist_mode, \
+ .set_persist_mode = cxl_mem_sparing_set_persist_mode, \
+ .get_repair_safe_when_in_use = cxl_get_mem_sparing_safe_when_in_use, \
+ .get_min_dpa = cxl_mem_sparing_get_min_dpa, \
+ .get_max_dpa = cxl_mem_sparing_get_max_dpa, \
+ .get_dpa = cxl_mem_sparing_get_dpa, \
+ .set_dpa = cxl_mem_sparing_set_dpa, \
+ .get_nibble_mask = cxl_mem_sparing_get_nibble_mask, \
+ .set_nibble_mask = cxl_mem_sparing_set_nibble_mask, \
+ .get_rank = cxl_mem_sparing_get_rank, \
+ .set_rank = cxl_mem_sparing_set_rank, \
+ .get_channel = cxl_mem_sparing_get_channel, \
+ .set_channel = cxl_mem_sparing_set_channel, \
+ .do_repair = cxl_do_mem_sparing
+
+#define BANK_OPS \
+ RANK_OPS, .get_bank_group = cxl_mem_sparing_get_bank_group, \
+ .set_bank_group = cxl_mem_sparing_set_bank_group, \
+ .get_bank = cxl_mem_sparing_get_bank, \
+ .set_bank = cxl_mem_sparing_set_bank
+
+#define ROW_OPS \
+ BANK_OPS, .get_row = cxl_mem_sparing_get_row, \
+ .set_row = cxl_mem_sparing_set_row
+
+#define CACHELINE_OPS \
+ ROW_OPS, .get_column = cxl_mem_sparing_get_column, \
+ .set_column = cxl_mem_sparing_set_column, \
+ .get_sub_channel = cxl_mem_sparing_get_sub_channel, \
+ .set_sub_channel = cxl_mem_sparing_set_sub_channel
+
+static const struct edac_mem_repair_ops cxl_rank_sparing_ops = {
+ RANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_bank_sparing_ops = {
+ BANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_row_sparing_ops = {
+ ROW_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_cacheline_sparing_ops = {
+ CACHELINE_OPS,
+};
+
+struct cxl_mem_sparing_desc {
+ const uuid_t repair_uuid;
+ enum edac_mem_repair_type repair_type;
+ const struct edac_mem_repair_ops *repair_ops;
+};
+
+static const struct cxl_mem_sparing_desc mem_sparing_desc[] = {
+ {
+ .repair_uuid = CXL_FEAT_CACHELINE_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_CACHELINE_SPARING,
+ .repair_ops = &cxl_cacheline_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_ROW_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_ROW_SPARING,
+ .repair_ops = &cxl_row_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_BANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_BANK_SPARING,
+ .repair_ops = &cxl_bank_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_RANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_RANK_SPARING,
+ .repair_ops = &cxl_rank_sparing_ops,
+ },
+};
+
+static int cxl_memdev_sparing_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ const struct cxl_mem_sparing_desc *desc,
+ u8 repair_inst)
+{
+ struct cxl_mem_sparing_context *cxl_sparing_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &desc->repair_uuid);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sparing_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sparing_ctx),
+ GFP_KERNEL);
+ if (!cxl_sparing_ctx)
+ return -ENOMEM;
+
+ *cxl_sparing_ctx = (struct cxl_mem_sparing_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = desc->repair_type,
+ .instance = repair_inst++,
+ };
+ uuid_copy(&cxl_sparing_ctx->repair_uuid, &desc->repair_uuid);
+
+ ret = cxl_mem_sparing_get_attrbs(cxl_sparing_ctx);
+ if (ret)
+ return ret;
+
+ if ((cxl_sparing_ctx->cap_soft_sparing &&
+ cxl_sparing_ctx->cap_hard_sparing) ||
+ cxl_sparing_ctx->cap_soft_sparing)
+ cxl_sparing_ctx->persist_mode = 0;
+ else if (cxl_sparing_ctx->cap_hard_sparing)
+ cxl_sparing_ctx->persist_mode = 1;
+ else
+ return -EOPNOTSUPP;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sparing_ctx->instance;
+ ras_feature->mem_repair_ops = desc->repair_ops;
+ ras_feature->ctx = cxl_sparing_ctx;
+
+ return 0;
+}
+
+/*
+ * CXL memory soft PPR & hard PPR control
+ */
+struct cxl_ppr_context {
+ uuid_t repair_uuid;
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_dpa;
+ bool cap_nib_mask;
+ bool media_accessible;
+ bool data_retained;
+ struct cxl_memdev *cxlmd;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+ u64 dpa;
+ u32 nibble_mask;
+};
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.2.1 Table 8-128 sPPR Feature Readable Attributes
+ *
+ * See CXL rev 3.2 @8.2.10.7.2.2 Table 8-131 hPPR Feature Readable Attributes
+ */
+
+#define CXL_PPR_OP_CAP_DEVICE_INITIATED BIT(0)
+#define CXL_PPR_OP_MODE_DEV_INITIATED BIT(0)
+
+#define CXL_PPR_FLAG_DPA_SUPPORT_MASK BIT(0)
+#define CXL_PPR_FLAG_NIB_SUPPORT_MASK BIT(1)
+#define CXL_PPR_FLAG_MEM_SPARING_EV_REC_SUPPORT_MASK BIT(2)
+#define CXL_PPR_FLAG_DEV_INITED_PPR_AT_BOOT_CAP_MASK BIT(3)
+
+#define CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK BIT(0)
+#define CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK BIT(2)
+
+#define CXL_PPR_SPARING_EV_REC_EN_MASK BIT(0)
+#define CXL_PPR_DEV_INITED_PPR_AT_BOOT_EN_MASK BIT(1)
+
+#define CXL_PPR_GET_CAP_DPA(flags) \
+ FIELD_GET(CXL_PPR_FLAG_DPA_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_CAP_NIB_MASK(flags) \
+ FIELD_GET(CXL_PPR_FLAG_NIB_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK, \
+ restriction_flags) ^ 1)
+#define CXL_PPR_GET_DATA_RETAINED(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK, \
+ restriction_flags) ^ 1)
+
+struct cxl_memdev_ppr_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 ppr_flags;
+ __le16 restriction_flags;
+ u8 ppr_op_mode;
+} __packed;
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.1.2 Table 8-118 sPPR Maintenance Input Payload
+ *
+ * See CXL rev 3.2 @8.2.10.7.1.3 Table 8-119 hPPR Maintenance Input Payload
+ */
+struct cxl_memdev_ppr_maintenance_attrbs {
+ u8 flags;
+ __le64 dpa;
+ u8 nibble_mask[3];
+} __packed;
+
+static int cxl_mem_ppr_get_attrbs(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_ppr_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+
+ struct cxl_memdev_ppr_rd_attrbs *rd_attrbs __free(kfree) =
+ kmalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_ppr_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_ppr_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_ppr_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ cxl_ppr_ctx->cap_dpa = CXL_PPR_GET_CAP_DPA(rd_attrbs->ppr_flags);
+ cxl_ppr_ctx->cap_nib_mask =
+ CXL_PPR_GET_CAP_NIB_MASK(rd_attrbs->ppr_flags);
+
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_ppr_ctx->media_accessible =
+ CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags);
+ cxl_ppr_ctx->data_retained =
+ CXL_PPR_GET_DATA_RETAINED(restriction_flags);
+
+ return 0;
+}
+
+static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_dpa_rwsem);
+ if (!dpa_lock)
+ return -EINTR;
+
+ if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ /* Check memory to repair is from the current boot */
+ attrbs.repair_type = CXL_PPR;
+ attrbs.dpa = cxl_ppr_ctx->dpa;
+ attrbs.nibble_mask = cxl_ppr_ctx->nibble_mask;
+ if (!cxl_find_rec_dram(cxlmd, &attrbs) &&
+ !cxl_find_rec_gen_media(cxlmd, &attrbs))
+ return -EINVAL;
+ }
+ }
+
+ memset(&maintenance_attrbs, 0, sizeof(maintenance_attrbs));
+ maintenance_attrbs.flags = 0;
+ maintenance_attrbs.dpa = cpu_to_le64(cxl_ppr_ctx->dpa);
+ put_unaligned_le24(cxl_ppr_ctx->nibble_mask,
+ maintenance_attrbs.nibble_mask);
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_ppr_ctx->op_class,
+ cxl_ppr_ctx->op_subclass,
+ &maintenance_attrbs,
+ sizeof(maintenance_attrbs));
+}
+
+static int cxl_ppr_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ *repair_type = edac_repair_type[EDAC_REPAIR_PPR];
+
+ return 0;
+}
+
+static int cxl_ppr_get_persist_mode(struct device *dev, void *drv_data,
+ bool *persist_mode)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *persist_mode = cxl_ppr_ctx->persist_mode;
+
+ return 0;
+}
+
+static int cxl_get_ppr_safe_when_in_use(struct device *dev, void *drv_data,
+ bool *safe)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *safe = cxl_ppr_ctx->media_accessible & cxl_ppr_ctx->data_retained;
+
+ return 0;
+}
+
+static int cxl_ppr_get_min_dpa(struct device *dev, void *drv_data, u64 *min_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_ppr_get_max_dpa(struct device *dev, void *drv_data, u64 *max_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_ppr_get_dpa(struct device *dev, void *drv_data, u64 *dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *dpa = cxl_ppr_ctx->dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ return -EINVAL;
+
+ cxl_ppr_ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_get_nibble_mask(struct device *dev, void *drv_data,
+ u32 *nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *nibble_mask = cxl_ppr_ctx->nibble_mask;
+
+ return 0;
+}
+
+static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data,
+ u32 nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ cxl_ppr_ctx->nibble_mask = nibble_mask;
+
+ return 0;
+}
+
+static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR)
+ return -EINVAL;
+
+ return cxl_mem_perform_ppr(cxl_ppr_ctx);
+}
+
+static const struct edac_mem_repair_ops cxl_sppr_ops = {
+ .get_repair_type = cxl_ppr_get_repair_type,
+ .get_persist_mode = cxl_ppr_get_persist_mode,
+ .get_repair_safe_when_in_use = cxl_get_ppr_safe_when_in_use,
+ .get_min_dpa = cxl_ppr_get_min_dpa,
+ .get_max_dpa = cxl_ppr_get_max_dpa,
+ .get_dpa = cxl_ppr_get_dpa,
+ .set_dpa = cxl_ppr_set_dpa,
+ .get_nibble_mask = cxl_ppr_get_nibble_mask,
+ .set_nibble_mask = cxl_ppr_set_nibble_mask,
+ .do_repair = cxl_do_ppr,
+};
+
+static int cxl_memdev_soft_ppr_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 repair_inst)
+{
+ struct cxl_ppr_context *cxl_sppr_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_SPPR_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sppr_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sppr_ctx), GFP_KERNEL);
+ if (!cxl_sppr_ctx)
+ return -ENOMEM;
+
+ *cxl_sppr_ctx = (struct cxl_ppr_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = EDAC_REPAIR_PPR,
+ .persist_mode = 0,
+ .instance = repair_inst,
+ };
+ uuid_copy(&cxl_sppr_ctx->repair_uuid, &CXL_FEAT_SPPR_UUID);
+
+ ret = cxl_mem_ppr_get_attrbs(cxl_sppr_ctx);
+ if (ret)
+ return ret;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sppr_ctx->instance;
+ ras_feature->mem_repair_ops = &cxl_sppr_ops;
+ ras_feature->ctx = cxl_sppr_ctx;
+
+ return 0;
+}
+
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ u8 repair_inst = 0;
+ int rc;
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_SCRUB)) {
+ rc = cxl_memdev_scrub_init(cxlmd, &ras_features[num_ras_features], 0);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_ECS)) {
+ rc = cxl_memdev_ecs_init(cxlmd, &ras_features[num_ras_features]);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR)) {
+ for (int i = 0; i < CXL_MEM_SPARING_MAX; i++) {
+ rc = cxl_memdev_sparing_init(cxlmd,
+ &ras_features[num_ras_features],
+ &mem_sparing_desc[i], repair_inst);
+ if (rc == -EOPNOTSUPP)
+ continue;
+ if (rc < 0)
+ return rc;
+
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ rc = cxl_memdev_soft_ppr_init(cxlmd, &ras_features[num_ras_features],
+ repair_inst);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP) {
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ if (repair_inst) {
+ struct cxl_mem_err_rec *array_rec =
+ devm_kzalloc(&cxlmd->dev, sizeof(*array_rec),
+ GFP_KERNEL);
+ if (!array_rec)
+ return -ENOMEM;
+
+ xa_init(&array_rec->rec_gen_media);
+ xa_init(&array_rec->rec_dram);
+ cxlmd->err_rec_array = array_rec;
+ }
+ }
+
+ if (!num_ras_features)
+ return -EINVAL;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlmd->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlmd->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_register, "CXL");
+
+int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_SCRUB))
+ return 0;
+
+ rc = cxl_region_scrub_init(cxlr, &ras_features[num_ras_features], 0);
+ if (rc < 0)
+ return rc;
+
+ num_ras_features++;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlr->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlr->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_region_edac_register, "CXL");
+
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec_gen_media;
+ struct cxl_event_dram *rec_dram;
+ unsigned long index;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return;
+
+ xa_for_each(&array_rec->rec_dram, index, rec_dram)
+ kfree(rec_dram);
+ xa_destroy(&array_rec->rec_dram);
+
+ xa_for_each(&array_rec->rec_gen_media, index, rec_gen_media)
+ kfree(rec_gen_media);
+ xa_destroy(&array_rec->rec_gen_media);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_release, "CXL");
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
index 1498e2369c37..6f2eae1eb126 100644
--- a/drivers/cxl/core/features.c
+++ b/drivers/cxl/core/features.c
@@ -9,6 +9,16 @@
#include "core.h"
#include "cxlmem.h"
+/**
+ * DOC: cxl features
+ *
+ * CXL Features:
+ * A CXL device that includes a mailbox supports commands that allows
+ * listing, getting, and setting of optionally defined features such
+ * as memory sparing or post package sparing. Vendors may define custom
+ * features for the device.
+ */
+
/* All the features below are exclusive to the kernel */
static const uuid_t cxl_exclusive_feats[] = {
CXL_FEAT_PATROL_SCRUB_UUID,
@@ -36,7 +46,7 @@ static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
}
-inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
+struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
{
return cxlds->cxlfs;
}
@@ -355,17 +365,11 @@ static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
{
}
-static struct cxl_feat_entry *
-get_support_feature_info(struct cxl_features_state *cxlfs,
- const struct fwctl_rpc_cxl *rpc_in)
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs,
+ const uuid_t *uuid)
{
struct cxl_feat_entry *feat;
- const uuid_t *uuid;
-
- if (rpc_in->op_size < sizeof(uuid))
- return ERR_PTR(-EINVAL);
-
- uuid = &rpc_in->set_feat_in.uuid;
for (int i = 0; i < cxlfs->entries->num_features; i++) {
feat = &cxlfs->entries->ent[i];
@@ -416,14 +420,6 @@ static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
rpc_out->size = struct_size(feat_out, ents, requested);
feat_out = &rpc_out->get_sup_feats_out;
- if (requested == 0) {
- feat_out->num_entries = cpu_to_le16(requested);
- feat_out->supported_feats =
- cpu_to_le16(cxlfs->entries->num_features);
- rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
- *out_len = out_size;
- return no_free_ptr(rpc_out);
- }
for (i = start, pos = &feat_out->ents[0];
i < cxlfs->entries->num_features; i++, pos++) {
@@ -547,7 +543,10 @@ static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
struct cxl_feat_entry *feat;
u32 flags;
- feat = get_support_feature_info(cxlfs, rpc_in);
+ if (rpc_in->op_size < sizeof(uuid_t))
+ return ERR_PTR(-EINVAL);
+
+ feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
if (IS_ERR(feat))
return false;
@@ -614,11 +613,7 @@ static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
switch (opcode) {
case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
case CXL_MBOX_OP_GET_FEATURE:
- if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
- return false;
- if (scope >= FWCTL_RPC_CONFIGURATION)
- return true;
- return false;
+ return cxl_mbox->feat_cap >= CXL_FEATURES_RO;
case CXL_MBOX_OP_SET_FEATURE:
if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
return false;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 70cae4ebf8a4..ab1007495f6b 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -34,7 +34,8 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc)
return rc;
- dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
+ dev_dbg(port->uport_dev, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&port->dev));
return 0;
}
@@ -603,7 +604,7 @@ int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
return 0;
}
-static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -666,15 +667,15 @@ static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long lon
skip = res->start - skip_start;
if (size > avail) {
- dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- res->name, &avail);
+ dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size,
+ res->name, (u64)avail);
return -ENOSPC;
}
return __cxl_dpa_reserve(cxled, start, size, skip);
}
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_port *port = cxled_to_port(cxled);
int rc;
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index d72764056ce6..2689e6453c5a 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -922,12 +922,19 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
hpa_alias = hpa - cache_size;
}
- if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA) {
+ if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
+
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
hpa_alias, &evt->gen_media);
- else if (event_type == CXL_CPER_EVENT_DRAM)
+ } else if (event_type == CXL_CPER_EVENT_DRAM) {
+ if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
+
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
&evt->dram);
+ }
}
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index a16a5886d40a..f88a13adf7fa 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -27,6 +27,7 @@ static void cxl_memdev_release(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
ida_free(&cxl_memdev_ida, cxlmd->id);
+ devm_cxl_memdev_edac_release(cxlmd);
kfree(cxlmd);
}
@@ -153,8 +154,8 @@ static ssize_t security_state_show(struct device *dev,
return sysfs_emit(buf, "frozen\n");
if (state & CXL_PMEM_SEC_STATE_LOCKED)
return sysfs_emit(buf, "locked\n");
- else
- return sysfs_emit(buf, "unlocked\n");
+
+ return sysfs_emit(buf, "unlocked\n");
}
static struct device_attribute dev_attr_security_state =
__ATTR(state, 0444, security_state_show, NULL);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 3b80e9a76ba8..b50551601c2e 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -415,17 +415,20 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
*/
if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
return devm_cxl_enable_mem(&port->dev, cxlds);
- else if (!hdm)
- return -ENODEV;
- root = to_cxl_port(port->dev.parent);
- while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
- root = to_cxl_port(root->dev.parent);
- if (!is_cxl_root(root)) {
- dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ /*
+ * If the HDM Decoder Capability does not exist and DVSEC was
+ * not setup, the DVSEC based emulation cannot be used.
+ */
+ if (!hdm)
return -ENODEV;
- }
+ /* The HDM Decoder Capability exists but is globally disabled. */
+
+ /*
+ * If the DVSEC CXL Range registers are not enabled, just
+ * enable and use the HDM Decoder Capability registers.
+ */
if (!info->mem_enabled) {
rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
if (rc)
@@ -434,6 +437,26 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return devm_cxl_enable_mem(&port->dev, cxlds);
}
+ /*
+ * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
+ * [High,Low] when HDM operation is enabled the range register values
+ * are ignored by the device, but the spec also recommends matching the
+ * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
+ * are expected even though Linux does not require or maintain that
+ * match. Check if at least one DVSEC range is enabled and allowed by
+ * the platform. That is, the DVSEC range must be covered by a locked
+ * platform window (CFMWS). Fail otherwise as the endpoint's decoders
+ * cannot be used.
+ */
+
+ root = to_cxl_port(port->dev.parent);
+ while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
+ root = to_cxl_port(root->dev.parent);
+ if (!is_cxl_root(root)) {
+ dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ return -ENODEV;
+ }
+
for (i = 0, allowed = 0; i < info->ranges; i++) {
struct device *cxld_dev;
@@ -453,15 +476,6 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return -ENXIO;
}
- /*
- * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
- * [High,Low] when HDM operation is enabled the range register values
- * are ignored by the device, but the spec also recommends matching the
- * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
- * are expected even though Linux does not require or maintain that
- * match. If at least one DVSEC range is enabled and allowed, skip HDM
- * Decoder Capability Enable.
- */
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL");
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 726bd4a7de27..eb46c6764d20 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -602,17 +602,19 @@ struct cxl_port *to_cxl_port(const struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_port, "CXL");
+struct cxl_port *parent_port_of(struct cxl_port *port)
+{
+ if (!port || !port->parent_dport)
+ return NULL;
+ return port->parent_dport->port;
+}
+
static void unregister_port(void *_port)
{
struct cxl_port *port = _port;
- struct cxl_port *parent;
+ struct cxl_port *parent = parent_port_of(port);
struct device *lock_dev;
- if (is_cxl_root(port))
- parent = NULL;
- else
- parent = to_cxl_port(port->dev.parent);
-
/*
* CXL root port's and the first level of ports are unregistered
* under the platform firmware device lock, all other ports are
@@ -1035,15 +1037,6 @@ struct cxl_root *find_cxl_root(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, "CXL");
-void put_cxl_root(struct cxl_root *cxl_root)
-{
- if (!cxl_root)
- return;
-
- put_device(&cxl_root->port.dev);
-}
-EXPORT_SYMBOL_NS_GPL(put_cxl_root, "CXL");
-
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index c3f4dc244df7..6e5e1460068d 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -231,11 +231,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
- } else {
- dev_WARN(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
- return -ENXIO;
}
+ dev_WARN(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
}
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
@@ -865,10 +864,23 @@ static int match_auto_decoder(struct device *dev, const void *data)
return 0;
}
+/**
+ * cxl_port_pick_region_decoder() - assign or lookup a decoder for a region
+ * @port: a port in the ancestry of the endpoint implied by @cxled
+ * @cxled: endpoint decoder to be, or currently, mapped by @port
+ * @cxlr: region to establish, or validate, decode @port
+ *
+ * In the region creation path cxl_port_pick_region_decoder() is an
+ * allocator to find a free port. In the region assembly path, it is
+ * recalling the decoder that platform firmware picked for validation
+ * purposes.
+ *
+ * The result is recorded in a 'struct cxl_region_ref' in @port.
+ */
static struct cxl_decoder *
-cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region *cxlr)
+cxl_port_pick_region_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
@@ -916,7 +928,8 @@ static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
static struct cxl_region_ref *
alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_decoder *cxld)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -930,9 +943,6 @@ alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
continue;
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (auto_order_ok(port, iter->region, cxld))
continue;
}
@@ -1014,19 +1024,11 @@ static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
return 0;
}
-static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region_ref *cxl_rr)
+static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region_ref *cxl_rr,
+ struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
- if (!cxld) {
- dev_dbg(&cxlr->dev, "%s: no decoder available\n",
- dev_name(&port->dev));
- return -EBUSY;
- }
-
if (cxld->region) {
dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
dev_name(&port->dev), dev_name(&cxld->dev),
@@ -1117,7 +1119,16 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr, cxled);
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_port_pick_region_decoder(port, cxled, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+ return -EBUSY;
+ }
+
+ cxl_rr = alloc_region_ref(port, cxlr, cxled, cxld);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
@@ -1126,7 +1137,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
nr_targets_inc = true;
- rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
+ rc = cxl_rr_assign_decoder(port, cxlr, cxled, cxl_rr, cxld);
if (rc)
goto out_erase;
}
@@ -1446,7 +1457,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
- cxld->interleave_granularity != ig ||
+ (iw > 1 && cxld->interleave_granularity != ig) ||
!region_res_match_cxl_range(p, &cxld->hpa_range) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
@@ -1748,13 +1759,6 @@ static int cmp_interleave_pos(const void *a, const void *b)
return cxled_a->pos - cxled_b->pos;
}
-static struct cxl_port *next_port(struct cxl_port *port)
-{
- if (!port->parent_dport)
- return NULL;
- return port->parent_dport->port;
-}
-
static int match_switch_decoder_by_range(struct device *dev,
const void *data)
{
@@ -1781,7 +1785,7 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
struct device *dev;
int rc = -ENXIO;
- parent = next_port(port);
+ parent = parent_port_of(port);
if (!parent)
return rc;
@@ -1805,6 +1809,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
}
put_device(dev);
+ if (rc)
+ dev_err(port->uport_dev,
+ "failed to find %s:%s in target list of %s\n",
+ dev_name(&port->dev),
+ dev_name(port->parent_dport->dport_dev),
+ dev_name(&cxlsd->cxld.dev));
+
return rc;
}
@@ -1861,7 +1872,7 @@ static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
*/
/* Iterate from endpoint to root_port refining the position */
- for (iter = port; iter; iter = next_port(iter)) {
+ for (iter = port; iter; iter = parent_port_of(iter)) {
if (is_cxl_root(iter))
break;
@@ -1940,7 +1951,9 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "region already active\n");
return -EBUSY;
- } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ }
+
+ if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "interleave config missing\n");
return -ENXIO;
}
@@ -2160,6 +2173,12 @@ static int attach_target(struct cxl_region *cxlr,
rc = cxl_region_attach(cxlr, cxled, pos);
up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem);
+
+ if (rc)
+ dev_warn(cxled->cxld.dev.parent,
+ "failed to attach %s to %s: %d\n",
+ dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
+
return rc;
}
@@ -3196,20 +3215,49 @@ err:
return rc;
}
-static int match_root_decoder_by_range(struct device *dev,
- const void *data)
+static int match_decoder_by_range(struct device *dev, const void *data)
{
const struct range *r1, *r2 = data;
- struct cxl_root_decoder *cxlrd;
+ struct cxl_decoder *cxld;
- if (!is_root_decoder(dev))
+ if (!is_switch_decoder(dev))
return 0;
- cxlrd = to_cxl_root_decoder(dev);
- r1 = &cxlrd->cxlsd.cxld.hpa_range;
+ cxld = to_cxl_decoder(dev);
+ r1 = &cxld->hpa_range;
return range_contains(r1, r2);
}
+static struct cxl_decoder *
+cxl_port_find_switch_decoder(struct cxl_port *port, struct range *hpa)
+{
+ struct device *cxld_dev = device_find_child(&port->dev, hpa,
+ match_decoder_by_range);
+
+ return cxld_dev ? to_cxl_decoder(cxld_dev) : NULL;
+}
+
+static struct cxl_root_decoder *
+cxl_find_root_decoder(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+ struct cxl_decoder *root, *cxld = &cxled->cxld;
+ struct range *hpa = &cxld->hpa_range;
+
+ root = cxl_port_find_switch_decoder(&cxl_root->port, hpa);
+ if (!root) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s no CXL window for range %#llx:%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxld->dev),
+ cxld->hpa_range.start, cxld->hpa_range.end);
+ return NULL;
+ }
+
+ return to_cxl_root_decoder(&root->dev);
+}
+
static int match_region_by_range(struct device *dev, const void *data)
{
struct cxl_region_params *p;
@@ -3376,47 +3424,45 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
return cxlr;
}
-int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
+static struct cxl_region *
+cxl_find_region_by_range(struct cxl_root_decoder *cxlrd, struct range *hpa)
+{
+ struct device *region_dev;
+
+ region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
+ match_region_by_range);
+ if (!region_dev)
+ return NULL;
+
+ return to_cxl_region(region_dev);
+}
+
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct range *hpa = &cxled->cxld.hpa_range;
- struct cxl_decoder *cxld = &cxled->cxld;
- struct device *cxlrd_dev, *region_dev;
- struct cxl_root_decoder *cxlrd;
struct cxl_region_params *p;
- struct cxl_region *cxlr;
bool attach = false;
int rc;
- cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
- match_root_decoder_by_range);
- if (!cxlrd_dev) {
- dev_err(cxlmd->dev.parent,
- "%s:%s no CXL window for range %#llx:%#llx\n",
- dev_name(&cxlmd->dev), dev_name(&cxld->dev),
- cxld->hpa_range.start, cxld->hpa_range.end);
+ struct cxl_root_decoder *cxlrd __free(put_cxl_root_decoder) =
+ cxl_find_root_decoder(cxled);
+ if (!cxlrd)
return -ENXIO;
- }
-
- cxlrd = to_cxl_root_decoder(cxlrd_dev);
/*
* Ensure that if multiple threads race to construct_region() for @hpa
* one does the construction and the others add to that.
*/
mutex_lock(&cxlrd->range_lock);
- region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
- match_region_by_range);
- if (!region_dev) {
+ struct cxl_region *cxlr __free(put_cxl_region) =
+ cxl_find_region_by_range(cxlrd, hpa);
+ if (!cxlr)
cxlr = construct_region(cxlrd, cxled);
- region_dev = &cxlr->dev;
- } else
- cxlr = to_cxl_region(region_dev);
mutex_unlock(&cxlrd->range_lock);
rc = PTR_ERR_OR_ZERO(cxlr);
if (rc)
- goto out;
+ return rc;
attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
@@ -3436,9 +3482,6 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
p->res);
}
- put_device(region_dev);
-out:
- put_device(cxlrd_dev);
return rc;
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
@@ -3537,8 +3580,18 @@ out:
switch (cxlr->mode) {
case CXL_PARTMODE_PMEM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
return devm_cxl_add_pmem_region(cxlr);
case CXL_PARTMODE_RAM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
/*
* The region can not be manged by CXL if any portion of
* it is already online as 'System RAM'
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index a9ab46eb0610..3f1695c96abc 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -724,6 +724,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
+struct cxl_port *parent_port_of(struct cxl_port *port);
void cxl_port_commit_reap(struct cxl_decoder *cxld);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
@@ -736,10 +737,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
struct cxl_root *devm_cxl_add_root(struct device *host,
const struct cxl_root_ops *ops);
struct cxl_root *find_cxl_root(struct cxl_port *port);
-void put_cxl_root(struct cxl_root *cxl_root);
-DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_device(&_T->port.dev))
DEFINE_FREE(put_cxl_port, struct cxl_port *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+DEFINE_FREE(put_cxl_root_decoder, struct cxl_root_decoder *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
+DEFINE_FREE(put_cxl_region, struct cxl_region *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -856,8 +859,7 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
-int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled);
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
#else
@@ -869,8 +871,7 @@ static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
{
return NULL;
}
-static inline int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled)
+static inline int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
return 0;
}
@@ -912,4 +913,14 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
u16 cxl_gpf_get_dvsec(struct device *dev);
+static inline struct rw_semaphore *rwsem_read_intr_acquire(struct rw_semaphore *rwsem)
+{
+ if (down_read_interruptible(rwsem))
+ return NULL;
+
+ return rwsem;
+}
+
+DEFINE_FREE(rwsem_read_release, struct rw_semaphore *, if (_T) up_read(_T))
+
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 3ec6b906371b..551b0ba2caa1 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -45,6 +45,11 @@
* @endpoint: connection to the CXL port topology for this memory device
* @id: id number of this memdev instance.
* @depth: endpoint port depth
+ * @scrub_cycle: current scrub cycle set for this device
+ * @scrub_region_id: id number of a backed region (if any) for which current scrub cycle set
+ * @err_rec_array: List of xarrarys to store the memdev error records to
+ * check attributes for a memory repair operation are from
+ * current boot.
*/
struct cxl_memdev {
struct device dev;
@@ -56,6 +61,9 @@ struct cxl_memdev {
struct cxl_port *endpoint;
int id;
int depth;
+ u8 scrub_cycle;
+ int scrub_region_id;
+ void *err_rec_array;
};
static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
@@ -527,6 +535,7 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_SUPPORTED_FEATURES = 0x0500,
CXL_MBOX_OP_GET_FEATURE = 0x0501,
CXL_MBOX_OP_SET_FEATURE = 0x0502,
+ CXL_MBOX_OP_DO_MAINTENANCE = 0x0600,
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
@@ -853,6 +862,27 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
+#ifdef CONFIG_CXL_EDAC_MEM_FEATURES
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd);
+int devm_cxl_region_edac_register(struct cxl_region *cxlr);
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt);
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt);
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd);
+#else
+static inline int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{ return 0; }
+static inline int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{ return 0; }
+static inline int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline int cxl_store_rec_dram(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{ return; }
+#endif
+
#ifdef CONFIG_CXL_SUSPEND
void cxl_mem_active_inc(void);
void cxl_mem_active_dec(void);
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 9675243bd05b..6e6777b7bafb 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -180,6 +180,10 @@ static int cxl_mem_probe(struct device *dev)
return rc;
}
+ rc = devm_cxl_memdev_edac_register(cxlmd);
+ if (rc)
+ dev_dbg(dev, "CXL memdev EDAC registration failed rc=%d\n", rc);
+
/*
* The kernel may be operating out of CXL memory on this device,
* there is no spec defined way to determine whether this device
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index a35fc5552845..fe4b593331da 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -30,7 +30,7 @@ static void schedule_detach(void *cxlmd)
schedule_cxl_memdev_detach(cxlmd);
}
-static int discover_region(struct device *dev, void *root)
+static int discover_region(struct device *dev, void *unused)
{
struct cxl_endpoint_decoder *cxled;
int rc;
@@ -49,7 +49,7 @@ static int discover_region(struct device *dev, void *root)
* Region enumeration is opportunistic, if this add-event fails,
* continue to the next endpoint decoder.
*/
- rc = cxl_add_to_region(root, cxled);
+ rc = cxl_add_to_region(cxled);
if (rc)
dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
@@ -95,7 +95,6 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_hdm *cxlhdm;
- struct cxl_port *root;
int rc;
rc = cxl_dvsec_rr_decode(cxlds, &info);
@@ -127,18 +126,10 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
return rc;
/*
- * This can't fail in practice as CXL root exit unregisters all
- * descendant ports and that in turn synchronizes with cxl_port_probe()
- */
- struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
-
- root = &cxl_root->port;
-
- /*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders
*/
- device_for_each_child(&port->dev, root, discover_region);
+ device_for_each_child(&port->dev, NULL, discover_region);
return 0;
}
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
index 3b1a845457b0..d1a8caa85369 100755
--- a/drivers/edac/mem_repair.c
+++ b/drivers/edac/mem_repair.c
@@ -45,6 +45,15 @@ struct edac_mem_repair_context {
struct attribute_group group;
};
+const char * const edac_repair_type[] = {
+ [EDAC_REPAIR_PPR] = "ppr",
+ [EDAC_REPAIR_CACHELINE_SPARING] = "cacheline-sparing",
+ [EDAC_REPAIR_ROW_SPARING] = "row-sparing",
+ [EDAC_REPAIR_BANK_SPARING] = "bank-sparing",
+ [EDAC_REPAIR_RANK_SPARING] = "rank-sparing",
+};
+EXPORT_SYMBOL_GPL(edac_repair_type);
+
#define TO_MR_DEV_ATTR(_dev_attr) \
container_of(_dev_attr, struct edac_mem_repair_dev_attr, dev_attr)
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 905c82e26ce7..a5f5e250223a 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -83,7 +83,7 @@ config FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
- depends on PCI && FIREWIRE && MMU
+ depends on PCI && FIREWIRE
help
Enable this driver if you have a FireWire controller based
on the OHCI specification. For all practical purposes, this
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index a123c05cbc9e..49e1de83d2e8 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -17,17 +17,11 @@ static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_afte
void __init kvm_init_hyp_services(void)
{
+ uuid_t kvm_uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM;
struct arm_smccc_res res;
u32 val[4];
- if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
- return;
-
- arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
- if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 ||
- res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 ||
- res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 ||
- res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3)
+ if (!arm_smccc_hypervisor_has_uuid(&kvm_uuid))
return;
memset(&res, 0, sizeof(res));
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index a74600d9f2d7..cd65b434dc6e 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -67,6 +67,23 @@ s32 arm_smccc_get_soc_id_revision(void)
}
EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision);
+bool arm_smccc_hypervisor_has_uuid(const uuid_t *hyp_uuid)
+{
+ struct arm_smccc_res res = {};
+ uuid_t uuid;
+
+ if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
+ return false;
+
+ arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return false;
+
+ uuid = smccc_res_to_uuid(res.a0, res.a1, res.a2, res.a3);
+ return uuid_equal(&uuid, hyp_uuid);
+}
+EXPORT_SYMBOL_GPL(arm_smccc_hypervisor_has_uuid);
+
static int __init smccc_devices_init(void)
{
struct platform_device *pdev;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f094797f3b2b..f7ea8e895c0c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -188,6 +188,7 @@ source "drivers/gpu/drm/display/Kconfig"
config DRM_TTM
tristate
depends on DRM && MMU
+ select SHMEM
help
GPU memory management subsystem for devices with multiple
GPU memory types. Will be enabled automatically if a device driver
@@ -397,7 +398,7 @@ source "drivers/gpu/drm/imagination/Kconfig"
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
- depends on DRM && PCI && MMU && HYPERV
+ depends on DRM && PCI && HYPERV
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 1a11cab741ac..058e3b3ad520 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -2,7 +2,7 @@
config DRM_AMDGPU
tristate "AMD GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on !UML
select FW_LOADER
select DRM_CLIENT
@@ -68,7 +68,6 @@ config DRM_AMDGPU_CIK
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
depends on DRM_AMDGPU
- depends on MMU
select HMM_MIRROR
select MMU_NOTIFIER
help
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da0663542e8a..242fbccdf844 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_AST
tristate "AST server chips"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index aa2ea128aa2f..a2acaa699dd5 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
- depends on DRM && PCI && X86 && MMU && HAS_IOPORT
+ depends on DRM && PCI && X86 && HAS_IOPORT
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 98d77d74999d..d1f3f5793f34 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,7 +2,6 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI
- depends on MMU
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index 94a8f902689e..bfe98cb9a038 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -9,76 +9,19 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
-/**
- * REG_BIT() - Prepare a u32 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u32, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT(__n) \
- ((u32)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 31))))
-
-/**
- * REG_BIT8() - Prepare a u8 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u8, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT8(__n) \
- ((u8)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 7))))
-
-/**
- * REG_GENMASK() - Prepare a continuous u32 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u32, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK(__high, __low) \
- ((u32)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
-
-/**
- * REG_GENMASK64() - Prepare a continuous u64 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK_ULL() to force u64, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
+/*
+ * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
+ * implementations, for compatibility reasons with previous implementation.
*/
-#define REG_GENMASK64(__high, __low) \
- ((u64)(GENMASK_ULL(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
+#define REG_GENMASK(high, low) GENMASK_U32(high, low)
+#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
+#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
+#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
-/**
- * REG_GENMASK8() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u8, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK8(__high, __low) \
- ((u8)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))
+#define REG_BIT(n) BIT_U32(n)
+#define REG_BIT64(n) BIT_U64(n)
+#define REG_BIT16(n) BIT_U16(n)
+#define REG_BIT8(n) BIT_U8(n)
/*
* Local integer constant expression version of is_power_of_2().
@@ -143,35 +86,6 @@
*/
#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
-/**
- * REG_BIT16() - Prepare a u16 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u16, with compile time
- * checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT16(__n) \
- ((u16)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 15))))
-
-/**
- * REG_GENMASK16() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u16, with compile time
- * checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK16(__high, __low) \
- ((u16)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 15 || (__low) > (__high)))))
/**
* REG_FIELD_PREP16() - Prepare a u16 bitfield value
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
index 552edfec7afb..d739d51cf54c 100644
--- a/drivers/gpu/drm/loongson/Kconfig
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -2,7 +2,7 @@
config DRM_LOONGSON
tristate "DRM support for Loongson Graphics"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on LOONGARCH || MIPS || COMPILE_TEST
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 412dcbea0e2d..a962ae564a75 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_MGAG200
tristate "Matrox G200"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 7b3e979c51ec..d1587639ebb0 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select IOMMU_API
select FW_LOADER
select FW_CACHE if PM_SLEEP
@@ -94,7 +94,6 @@ config DRM_NOUVEAU_SVM
bool "(EXPERIMENTAL) Enable SVM (Shared Virtual Memory) support"
depends on DEVICE_PRIVATE
depends on DRM_NOUVEAU
- depends on MMU
depends on STAGING
select HMM_MIRROR
select MMU_NOTIFIER
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 69427eb8bed2..d8f24bcae34b 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_QXL
tristate "QXL virtual GPU"
- depends on DRM && PCI && MMU && HAS_IOPORT
+ depends on DRM && PCI && HAS_IOPORT
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index f51bace9555d..c479f0c0dd5c 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -2,7 +2,7 @@
config DRM_RADEON
tristate "ATI Radeon"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on AGP || !AGP
select FW_LOADER
select DRM_CLIENT_SELECTION
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 6d1b3e2cb3fb..06e54694a7f2 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -38,7 +38,7 @@ config DRM_BOCHS
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 6c3c2922ae8b..aab646b91ca9 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on (X86 && HYPERVISOR_GUEST) || ARM64
select DRM_CLIENT_SELECTION
select DRM_TTM
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 9bce047901b2..2169bc969ea1 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
tristate "Intel Xe Graphics"
- depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
+ depends on DRM && PCI && (m || (y && KUNIT=y))
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a503252702b7..43859fc75747 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -151,6 +151,7 @@ config HID_APPLEIR
config HID_APPLETB_BL
tristate "Apple Touch Bar Backlight"
depends on BACKLIGHT_CLASS_DEVICE
+ depends on X86 || COMPILE_TEST
help
Say Y here if you want support for the backlight of Touch Bars on x86
MacBook Pros.
@@ -163,6 +164,7 @@ config HID_APPLETB_KBD
depends on USB_HID
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
+ depends on X86 || COMPILE_TEST
select INPUT_SPARSEKMAP
select HID_APPLETB_BL
help
diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c
index 029ccbaa1d12..ef51b2c06872 100644
--- a/drivers/hid/hid-appletb-kbd.c
+++ b/drivers/hid/hid-appletb-kbd.c
@@ -172,7 +172,8 @@ static void appletb_inactivity_timer(struct timer_list *t)
if (!kbd->has_dimmed) {
backlight_device_set_brightness(kbd->backlight_dev, 1);
kbd->has_dimmed = true;
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_idle_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_idle_timeout));
} else if (!kbd->has_turned_off) {
backlight_device_set_brightness(kbd->backlight_dev, 0);
kbd->has_turned_off = true;
@@ -188,7 +189,8 @@ static void reset_inactivity_timer(struct appletb_kbd *kbd)
kbd->has_dimmed = false;
kbd->has_turned_off = false;
}
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_dim_timeout));
}
}
@@ -407,7 +409,8 @@ static int appletb_kbd_probe(struct hid_device *hdev, const struct hid_device_id
} else {
backlight_device_set_brightness(kbd->backlight_dev, 2);
timer_setup(&kbd->inactivity_timer, appletb_inactivity_timer, 0);
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_dim_timeout));
}
kbd->inp_handler.event = appletb_kbd_inp_event;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 4741ff626771..b348d0464314 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2396,6 +2396,9 @@ int hid_hw_open(struct hid_device *hdev)
ret = hdev->ll_driver->open(hdev);
if (ret)
hdev->ll_open_count--;
+
+ if (hdev->driver->on_hid_hw_open)
+ hdev->driver->on_hid_hw_open(hdev);
}
mutex_unlock(&hdev->ll_open_lock);
@@ -2415,8 +2418,12 @@ EXPORT_SYMBOL_GPL(hid_hw_open);
void hid_hw_close(struct hid_device *hdev)
{
mutex_lock(&hdev->ll_open_lock);
- if (!--hdev->ll_open_count)
+ if (!--hdev->ll_open_count) {
hdev->ll_driver->close(hdev);
+
+ if (hdev->driver->on_hid_hw_close)
+ hdev->driver->on_hid_hw_close(hdev);
+ }
mutex_unlock(&hdev->ll_open_lock);
}
EXPORT_SYMBOL_GPL(hid_hw_close);
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index afbd67aa9719..fee134a7eba3 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -507,7 +507,7 @@ static void corsair_void_status_work_handler(struct work_struct *work)
struct delayed_work *delayed_work;
int battery_ret;
- delayed_work = container_of(work, struct delayed_work, work);
+ delayed_work = to_delayed_work(work);
drvdata = container_of(delayed_work, struct corsair_void_drvdata,
delayed_status_work);
@@ -525,7 +525,7 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
struct delayed_work *delayed_work;
int firmware_ret;
- delayed_work = container_of(work, struct delayed_work, work);
+ delayed_work = to_delayed_work(work);
drvdata = container_of(delayed_work, struct corsair_void_drvdata,
delayed_firmware_work);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f4c8d981aa0a..234fa82eab07 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -17,11 +17,13 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/hidraw.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/nls.h>
#include <linux/string_choices.h>
#include <linux/usb/ch9.h>
@@ -185,7 +187,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -194,7 +196,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
if (ret >= 0)
ret = -EIO;
- goto exit;
+ return ret;
}
buf[1] &= ~BIT(offset);
@@ -207,25 +209,19 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
hid_err(hdev, "error setting GPIO config: %d\n", ret);
if (ret >= 0)
ret = -EIO;
- goto exit;
+ return ret;
}
- ret = 0;
-
-exit:
- mutex_unlock(&dev->lock);
- return ret;
+ return 0;
}
-static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int cp2112_gpio_set_unlocked(struct cp2112_device *dev,
+ unsigned int offset, int value)
{
- struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
-
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? CP2112_GPIO_ALL_GPIO_MASK : 0;
buf[2] = BIT(offset);
@@ -236,7 +232,17 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
- mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+
+ guard(mutex)(&dev->lock);
+
+ return cp2112_gpio_set_unlocked(dev, offset, value);
}
static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -246,23 +252,17 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_GET_LENGTH) {
hid_err(hdev, "error requesting GPIO values: %d\n", ret);
- ret = ret < 0 ? ret : -EIO;
- goto exit;
+ return ret < 0 ? ret : -EIO;
}
- ret = buf[1];
-
-exit:
- mutex_unlock(&dev->lock);
-
- return ret;
+ return buf[1];
}
static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -284,14 +284,14 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- goto fail;
+ return ret < 0 ? ret : -EIO;
}
buf[1] |= 1 << offset;
@@ -302,22 +302,16 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- goto fail;
+ return ret;
}
- mutex_unlock(&dev->lock);
-
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
- cp2112_gpio_set(chip, offset, value);
+ cp2112_gpio_set_unlocked(dev, offset, value);
return 0;
-
-fail:
- mutex_unlock(&dev->lock);
- return ret < 0 ? ret : -EIO;
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1205,7 +1199,11 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!dev->in_out_buffer)
return -ENOMEM;
- mutex_init(&dev->lock);
+ ret = devm_mutex_init(&hdev->dev, &dev->lock);
+ if (ret) {
+ hid_err(hdev, "mutex init failed\n");
+ return ret;
+ }
ret = hid_parse(hdev);
if (ret) {
@@ -1290,7 +1288,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.label = "cp2112_gpio";
dev->gc.direction_input = cp2112_gpio_direction_input;
dev->gc.direction_output = cp2112_gpio_direction_output;
- dev->gc.set = cp2112_gpio_set;
+ dev->gc.set_rv = cp2112_gpio_set;
dev->gc.get = cp2112_gpio_get;
dev->gc.base = -1;
dev->gc.ngpio = CP2112_GPIO_MAX_GPIO;
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 0fb210e40a41..9eafff0b6ea4 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -192,7 +192,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
goto cleanup;
input_device->report_desc_size = le16_to_cpu(
- desc->desc[0].wDescriptorLength);
+ desc->rpt_desc.wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@@ -210,7 +210,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
- le16_to_cpu(desc->desc[0].wDescriptorLength));
+ le16_to_cpu(desc->rpt_desc.wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 1062731315a2..e3fb4e2fe911 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -96,6 +96,7 @@
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269
+#define USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC 0x0323
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC 0x0324
diff --git a/drivers/hid/hid-kysona.c b/drivers/hid/hid-kysona.c
index d4c0406b3323..09bfe30d02cb 100644
--- a/drivers/hid/hid-kysona.c
+++ b/drivers/hid/hid-kysona.c
@@ -14,6 +14,7 @@
#define BATTERY_TIMEOUT_MS 5000
+#define ONLINE_REPORT_ID 3
#define BATTERY_REPORT_ID 4
struct kysona_drvdata {
@@ -80,11 +81,46 @@ static int kysona_battery_get_property(struct power_supply *psy,
return ret;
}
+static const char kysona_online_request[] = {
+ 0x08, ONLINE_REPORT_ID, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a
+};
+
static const char kysona_battery_request[] = {
0x08, BATTERY_REPORT_ID, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49
};
+static int kysona_m600_fetch_online(struct hid_device *hdev)
+{
+ u8 *write_buf;
+ int ret;
+
+ /* Request online information */
+ write_buf = kmemdup(kysona_online_request, sizeof(kysona_online_request), GFP_KERNEL);
+ if (!write_buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, kysona_online_request[0],
+ write_buf, sizeof(kysona_online_request),
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ if (ret < (int)sizeof(kysona_online_request)) {
+ hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
+ ret = -ENODATA;
+ }
+ kfree(write_buf);
+ return ret;
+}
+
+static void kysona_fetch_online(struct hid_device *hdev)
+{
+ int ret = kysona_m600_fetch_online(hdev);
+
+ if (ret < 0)
+ hid_dbg(hdev,
+ "Online query failed (err: %d)\n", ret);
+}
+
static int kysona_m600_fetch_battery(struct hid_device *hdev)
{
u8 *write_buf;
@@ -121,6 +157,7 @@ static void kysona_battery_timer_tick(struct work_struct *work)
struct kysona_drvdata, battery_work.work);
struct hid_device *hdev = drv_data->hdev;
+ kysona_fetch_online(hdev);
kysona_fetch_battery(hdev);
schedule_delayed_work(&drv_data->battery_work,
msecs_to_jiffies(BATTERY_TIMEOUT_MS));
@@ -160,6 +197,7 @@ static int kysona_battery_probe(struct hid_device *hdev)
power_supply_powers(drv_data->battery, &hdev->dev);
INIT_DELAYED_WORK(&drv_data->battery_work, kysona_battery_timer_tick);
+ kysona_fetch_online(hdev);
kysona_fetch_battery(hdev);
schedule_delayed_work(&drv_data->battery_work,
msecs_to_jiffies(BATTERY_TIMEOUT_MS));
@@ -206,12 +244,16 @@ static int kysona_raw_event(struct hid_device *hdev,
{
struct kysona_drvdata *drv_data = hid_get_drvdata(hdev);
- if (drv_data->battery && size == sizeof(kysona_battery_request) &&
+ if (size == sizeof(kysona_online_request) &&
+ data[0] == 8 && data[1] == ONLINE_REPORT_ID) {
+ drv_data->online = data[6];
+ }
+
+ if (size == sizeof(kysona_battery_request) &&
data[0] == 8 && data[1] == BATTERY_REPORT_ID) {
drv_data->battery_capacity = data[6];
drv_data->battery_charging = data[7];
drv_data->battery_voltage = (data[8] << 8) | data[9];
- drv_data->online = true;
}
return 0;
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index c0a138f21ca4..445623dd1bd6 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -823,7 +823,7 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr
for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) {
if (entry->wdata.alternate_modes & BIT(i)) {
/* Print tag and full name */
- count += scnprintf(buf + count, PAGE_SIZE - count, "%s: %s",
+ count += sysfs_emit_at(buf, count, "%s: %s",
lg4ff_alternate_modes[i].tag,
!lg4ff_alternate_modes[i].product_id ? entry->wdata.real_name : lg4ff_alternate_modes[i].name);
if (count >= PAGE_SIZE - 1)
@@ -832,9 +832,9 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr
/* Mark the currently active mode with an asterisk */
if (lg4ff_alternate_modes[i].product_id == entry->wdata.product_id ||
(lg4ff_alternate_modes[i].product_id == 0 && entry->wdata.product_id == entry->wdata.real_product_id))
- count += scnprintf(buf + count, PAGE_SIZE - count, " *\n");
+ count += sysfs_emit_at(buf, count, " *\n");
else
- count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
+ count += sysfs_emit_at(buf, count, "\n");
if (count >= PAGE_SIZE - 1)
return count;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index adfa329e917b..d4d91e49bbe8 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -218,7 +218,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
int pressure = 0;
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
x = (tdata[1] << 28 | tdata[0] << 20) >> 20;
y = -((tdata[2] << 24 | tdata[1] << 16) >> 20);
@@ -370,7 +371,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
if (report_undeciphered) {
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
else if (input->id.product !=
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
@@ -497,7 +499,8 @@ static int magicmouse_raw_event(struct hid_device *hdev,
}
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
@@ -519,7 +522,8 @@ static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
- if (msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ if ((msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) &&
field->report->id == MOUSE2_REPORT_ID) {
/*
* magic_mouse_raw_event has done all the work. Skip hidinput.
@@ -540,7 +544,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_KEY, input->evbit);
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
if (emulate_3button)
@@ -625,7 +630,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
@@ -741,19 +747,25 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev)
int ret;
int feature_size;
- if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
- if (hdev->vendor == BT_VENDOR_ID_APPLE) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2:
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC:
+ switch (hdev->vendor) {
+ case BT_VENDOR_ID_APPLE:
feature_size = sizeof(feature_mt_trackpad2_bt);
feature = feature_mt_trackpad2_bt;
- } else { /* USB_VENDOR_ID_APPLE */
+ break;
+ default: /* USB_VENDOR_ID_APPLE */
feature_size = sizeof(feature_mt_trackpad2_usb);
feature = feature_mt_trackpad2_usb;
}
- } else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2:
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC:
feature_size = sizeof(feature_mt_mouse2);
feature = feature_mt_mouse2;
- } else {
+ break;
+ default:
feature_size = sizeof(feature_mt);
feature = feature_mt;
}
@@ -787,6 +799,7 @@ static int magicmouse_fetch_battery(struct hid_device *hdev)
if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
(hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC &&
hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC))
return -1;
@@ -857,6 +870,7 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->vendor == USB_VENDOR_ID_APPLE &&
(id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
hdev->type != HID_TYPE_USBMOUSE)))
@@ -868,21 +882,27 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_stop_hw;
}
- if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
- report = hid_register_report(hdev, HID_INPUT_REPORT,
- MOUSE_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
- report = hid_register_report(hdev, HID_INPUT_REPORT,
- MOUSE2_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
- if (id->vendor == BT_VENDOR_ID_APPLE)
+ switch (id->product) {
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE:
+ report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE_REPORT_ID, 0);
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2:
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC:
+ report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE2_REPORT_ID, 0);
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2:
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC:
+ switch (id->vendor) {
+ case BT_VENDOR_ID_APPLE:
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_BT_REPORT_ID, 0);
- else /* USB_VENDOR_ID_APPLE */
+ break;
+ default:
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_USB_REPORT_ID, 0);
- } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ }
+ break;
+ default: /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD_REPORT_ID, 0);
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -909,7 +929,8 @@ static int magicmouse_probe(struct hid_device *hdev,
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
- if (ret == -EIO && id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ if (ret == -EIO && (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)) {
schedule_delayed_work(&msc->work, msecs_to_jiffies(500));
}
@@ -945,6 +966,7 @@ static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
if (hdev->vendor == USB_VENDOR_ID_APPLE &&
(hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
*rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
@@ -971,6 +993,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
diff --git a/drivers/hid/hid-mcp2200.c b/drivers/hid/hid-mcp2200.c
index bf57f7f6caa0..e6ea0a2140eb 100644
--- a/drivers/hid/hid-mcp2200.c
+++ b/drivers/hid/hid-mcp2200.c
@@ -127,8 +127,8 @@ static int mcp_cmd_read_all(struct mcp2200 *mcp)
return mcp->status;
}
-static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct mcp2200 *mcp = gpiochip_get_data(gc);
u8 value;
@@ -150,16 +150,20 @@ static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
if (status == sizeof(struct mcp_set_clear_outputs))
mcp->gpio_val = value;
+ else
+ status = -EIO;
mutex_unlock(&mcp->lock);
+
+ return status;
}
-static void mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
+static int mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
{
unsigned long mask = 1 << gpio_nr;
unsigned long bmap_value = value << gpio_nr;
- mcp_set_multiple(gc, &mask, &bmap_value);
+ return mcp_set_multiple(gc, &mask, &bmap_value);
}
static int mcp_get_multiple(struct gpio_chip *gc, unsigned long *mask,
@@ -263,9 +267,10 @@ static int mcp_direction_output(struct gpio_chip *gc, unsigned int gpio_nr,
bmap_value = value << gpio_nr;
ret = mcp_set_direction(gc, gpio_nr, MCP2200_DIR_OUT);
- if (!ret)
- mcp_set_multiple(gc, &mask, &bmap_value);
- return ret;
+ if (ret)
+ return ret;
+
+ return mcp_set_multiple(gc, &mask, &bmap_value);
}
static const struct gpio_chip template_chip = {
@@ -274,8 +279,8 @@ static const struct gpio_chip template_chip = {
.get_direction = mcp_get_direction,
.direction_input = mcp_direction_input,
.direction_output = mcp_direction_output,
- .set = mcp_set,
- .set_multiple = mcp_set_multiple,
+ .set_rv = mcp_set,
+ .set_multiple_rv = mcp_set_multiple,
.get = mcp_get,
.get_multiple = mcp_get_multiple,
.base = -1,
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 0f93c22a479f..6c0ac14f11a6 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -624,10 +624,10 @@ static int mcp_gpio_get(struct gpio_chip *gc,
return ret;
}
-static void mcp_gpio_set(struct gpio_chip *gc,
- unsigned int offset, int value)
+static int mcp_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mcp2221 *mcp = gpiochip_get_data(gc);
+ int ret;
memset(mcp->txbuf, 0, 18);
mcp->txbuf[0] = MCP2221_GPIO_SET;
@@ -638,8 +638,10 @@ static void mcp_gpio_set(struct gpio_chip *gc,
mcp->txbuf[mcp->gp_idx] = !!value;
mutex_lock(&mcp->lock);
- mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 18);
mutex_unlock(&mcp->lock);
+
+ return ret;
}
static int mcp_gpio_dir_set(struct mcp2221 *mcp,
@@ -1206,7 +1208,7 @@ static int mcp2221_probe(struct hid_device *hdev,
mcp->gc->direction_input = mcp_gpio_direction_input;
mcp->gc->direction_output = mcp_gpio_direction_output;
mcp->gc->get_direction = mcp_gpio_get_direction;
- mcp->gc->set = mcp_gpio_set;
+ mcp->gc->set_rv = mcp_gpio_set;
mcp->gc->get = mcp_gpio_get;
mcp->gc->ngpio = MCP_NGPIO;
mcp->gc->base = -1;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 7ac8e16e6158..ded0fef7d8c7 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1887,6 +1887,16 @@ static void mt_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static void mt_on_hid_hw_open(struct hid_device *hdev)
+{
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
+}
+
+static void mt_on_hid_hw_close(struct hid_device *hdev)
+{
+ mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_NONE);
+}
+
/*
* This list contains only:
* - VID/PID of products not working with the default multitouch handling
@@ -2354,5 +2364,7 @@ static struct hid_driver mt_driver = {
.suspend = pm_ptr(mt_suspend),
.reset_resume = pm_ptr(mt_reset_resume),
.resume = pm_ptr(mt_resume),
+ .on_hid_hw_open = mt_on_hid_hw_open,
+ .on_hid_hw_close = mt_on_hid_hw_close,
};
module_hid_driver(mt_driver);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 0731473cc9b1..7fefeb413ec3 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -1063,7 +1063,7 @@ bool hid_ignore(struct hid_device *hdev)
}
if (hdev->type == HID_TYPE_USBMOUSE &&
- hid_match_id(hdev, hid_mouse_ignore_list))
+ hdev->quirks & HID_QUIRK_IGNORE_MOUSE)
return true;
return !!hid_match_id(hdev, hid_ignore_list);
@@ -1267,6 +1267,9 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
if (hid_match_id(hdev, hid_ignore_list))
quirks |= HID_QUIRK_IGNORE;
+ if (hid_match_id(hdev, hid_mouse_ignore_list))
+ quirks |= HID_QUIRK_IGNORE_MOUSE;
+
if (hid_match_id(hdev, hid_have_special_driver))
quirks |= HID_QUIRK_HAVE_SPECIAL_DRIVER;
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index fa51155ebe39..8a8c4a46f927 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -82,15 +82,10 @@ static int quicki2c_acpi_get_dsd_property(struct acpi_device *adev, acpi_string
{
acpi_handle handle = acpi_device_handle(adev);
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object obj = { .type = type };
- struct acpi_object_list arg_list = {
- .count = 1,
- .pointer = &obj,
- };
union acpi_object *ret_obj;
acpi_status status;
- status = acpi_evaluate_object(handle, dsd_method_name, &arg_list, &buffer);
+ status = acpi_evaluate_object(handle, dsd_method_name, NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle,
"Can't evaluate %s method: %d\n", dsd_method_name, status);
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
index 4fc78b5a04b5..c105df7f6c87 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -1121,7 +1121,7 @@ EXPORT_SYMBOL_NS_GPL(thc_port_select, "INTEL_THC");
static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
{
- int frequency[] = {
+ static const int frequency[] = {
THC_SPI_FREQUENCY_7M,
THC_SPI_FREQUENCY_15M,
THC_SPI_FREQUENCY_17M,
@@ -1130,7 +1130,7 @@ static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
THC_SPI_FREQUENCY_31M,
THC_SPI_FREQUENCY_41M,
};
- u8 frequency_div[] = {
+ static const u8 frequency_div[] = {
THC_SPI_FRQ_DIV_2,
THC_SPI_FRQ_DIV_1,
THC_SPI_FRQ_DIV_7,
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 7d9297fad90e..d4cbecc668ec 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid)
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
+ struct hid_class_descriptor *hcdesc;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
- int ret, n;
- int num_descriptors;
- size_t offset = offsetof(struct hid_descriptor, desc);
+ int ret;
quirks = hid_lookup_quirk(hid);
@@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
- if (hdesc->bLength < sizeof(struct hid_descriptor)) {
- dbg_hid("hid descriptor is too short\n");
+ if (!hdesc->bNumDescriptors ||
+ hdesc->bLength != sizeof(*hdesc) +
+ (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
+ dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
+ hdesc->bLength, hdesc->bNumDescriptors);
return -EINVAL;
}
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- num_descriptors = min_t(int, hdesc->bNumDescriptors,
- (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
-
- for (n = 0; n < num_descriptors; n++)
- if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
- rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT)
+ rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
@@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid)
goto err;
}
+ if (hdesc->bNumDescriptors > 1)
+ hid_warn(intf,
+ "%u unsupported optional hid class descriptors\n",
+ (int)(hdesc->bNumDescriptors - 1));
+
hid->quirks |= quirks;
return 0;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 6c1416167bd2..1cd188b73b74 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -5,17 +5,18 @@ menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
depends on (X86 && X86_LOCAL_APIC && HYPERVISOR_GUEST) \
- || (ACPI && ARM64 && !CPU_BIG_ENDIAN)
+ || (ARM64 && !CPU_BIG_ENDIAN)
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select OF_EARLY_FLATTREE if OF
+ select SYSFB if !HYPERV_VTL_MODE
help
Select this option to run Linux as a Hyper-V client operating
system.
config HYPERV_VTL_MODE
bool "Enable Linux to boot in VTL context"
- depends on X86_64 && HYPERV
+ depends on (X86_64 || ARM64) && HYPERV
depends on SMP
default n
help
@@ -31,7 +32,7 @@ config HYPERV_VTL_MODE
Select this option to build a Linux kernel to run at a VTL other than
the normal VTL0, which currently is only VTL2. This option
- initializes the x86 platform for VTL2, and adds the ability to boot
+ initializes the kernel to run in VTL2, and adds the ability to boot
secondary CPUs directly into 64-bit context as required for VTLs other
than 0. A kernel built with this option must run at VTL2, and will
not run as a normal guest.
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8351360bba16..be490c598785 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -207,10 +207,19 @@ int vmbus_connect(void)
mutex_init(&vmbus_connection.channel_mutex);
/*
+ * The following Hyper-V interrupt and monitor pages can be used by
+ * UIO for mapping to user-space, so they should always be allocated on
+ * system page boundaries. The system page size must be >= the Hyper-V
+ * page size.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
+
+ /*
* Setup the vmbus event connection for channel interrupt
* abstraction stuff
*/
- vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page();
+ vmbus_connection.int_page =
+ (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (vmbus_connection.int_page == NULL) {
ret = -ENOMEM;
goto cleanup;
@@ -225,8 +234,8 @@ int vmbus_connect(void)
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
- vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page();
- vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page();
+ vmbus_connection.monitor_pages[0] = (void *)__get_free_page(GFP_KERNEL);
+ vmbus_connection.monitor_pages[1] = (void *)__get_free_page(GFP_KERNEL);
if ((vmbus_connection.monitor_pages[0] == NULL) ||
(vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
@@ -342,21 +351,23 @@ void vmbus_disconnect(void)
destroy_workqueue(vmbus_connection.work_queue);
if (vmbus_connection.int_page) {
- hv_free_hyperv_page(vmbus_connection.int_page);
+ free_page((unsigned long)vmbus_connection.int_page);
vmbus_connection.int_page = NULL;
}
if (vmbus_connection.monitor_pages[0]) {
if (!set_memory_encrypted(
(unsigned long)vmbus_connection.monitor_pages[0], 1))
- hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+ free_page((unsigned long)
+ vmbus_connection.monitor_pages[0]);
vmbus_connection.monitor_pages[0] = NULL;
}
if (vmbus_connection.monitor_pages[1]) {
if (!set_memory_encrypted(
(unsigned long)vmbus_connection.monitor_pages[1], 1))
- hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+ free_page((unsigned long)
+ vmbus_connection.monitor_pages[1]);
vmbus_connection.monitor_pages[1] = NULL;
}
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 59792e00cecf..49898d10faff 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -105,45 +105,6 @@ void __init hv_common_free(void)
hv_synic_eventring_tail = NULL;
}
-/*
- * Functions for allocating and freeing memory with size and
- * alignment HV_HYP_PAGE_SIZE. These functions are needed because
- * the guest page size may not be the same as the Hyper-V page
- * size. We depend upon kmalloc() aligning power-of-two size
- * allocations to the allocation size boundary, so that the
- * allocated memory appears to Hyper-V as a page of the size
- * it expects.
- */
-
-void *hv_alloc_hyperv_page(void)
-{
- BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
-
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL);
- else
- return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
-
-void *hv_alloc_hyperv_zeroed_page(void)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- else
- return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
-
-void hv_free_hyperv_page(void *addr)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- free_page((unsigned long)addr);
- else
- kfree(addr);
-}
-EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
-
static void *hv_panic_page;
/*
@@ -272,7 +233,7 @@ static void hv_kmsg_dump_unregister(void)
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_report_block);
- hv_free_hyperv_page(hv_panic_page);
+ kfree(hv_panic_page);
hv_panic_page = NULL;
}
@@ -280,7 +241,7 @@ static void hv_kmsg_dump_register(void)
{
int ret;
- hv_panic_page = hv_alloc_hyperv_zeroed_page();
+ hv_panic_page = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hv_panic_page) {
pr_err("Hyper-V: panic message page memory allocation failed\n");
return;
@@ -289,7 +250,7 @@ static void hv_kmsg_dump_register(void)
ret = kmsg_dump_register(&hv_kmsg_dumper);
if (ret) {
pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
- hv_free_hyperv_page(hv_panic_page);
+ kfree(hv_panic_page);
hv_panic_page = NULL;
}
}
@@ -317,6 +278,37 @@ void __init hv_get_partition_id(void)
pr_err("Hyper-V: failed to get partition ID: %#x\n",
hv_result(status));
}
+#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
+u8 __init get_vtl(void)
+{
+ u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
+ struct hv_input_get_vp_registers *input;
+ struct hv_output_get_vp_registers *output;
+ unsigned long flags;
+ u64 ret;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, struct_size(input, names, 1));
+ input->partition_id = HV_PARTITION_ID_SELF;
+ input->vp_index = HV_VP_INDEX_SELF;
+ input->input_vtl.as_uint8 = 0;
+ input->names[0] = HV_REGISTER_VSM_VP_STATUS;
+
+ ret = hv_do_hypercall(control, input, output);
+ if (hv_result_success(ret)) {
+ ret = output->values[0].reg8 & HV_VTL_MASK;
+ } else {
+ pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
+ BUG();
+ }
+
+ local_irq_restore(flags);
+ return ret;
+}
+#endif
int __init hv_common_init(void)
{
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index d74adb5bba44..33b524b4eb5e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,7 +45,8 @@ struct vmbus_dynid {
struct hv_vmbus_device_id id;
};
-static struct device *hv_dev;
+/* VMBus Root Device */
+static struct device *vmbus_root_device;
static int hyperv_cpuhp_online;
@@ -80,9 +81,15 @@ static struct resource *fb_mmio;
static struct resource *hyperv_mmio;
static DEFINE_MUTEX(hyperv_mmio_lock);
+struct device *hv_get_vmbus_root_device(void)
+{
+ return vmbus_root_device;
+}
+EXPORT_SYMBOL_GPL(hv_get_vmbus_root_device);
+
static int vmbus_exists(void)
{
- if (hv_dev == NULL)
+ if (vmbus_root_device == NULL)
return -ENODEV;
return 0;
@@ -707,7 +714,30 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(const struct hv_driver *
return id;
}
-/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
+/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices
+ *
+ * This function can race with vmbus_device_register(). This function is
+ * typically running on a user thread in response to writing to the "new_id"
+ * sysfs entry for a driver. vmbus_device_register() is running on a
+ * workqueue thread in response to the Hyper-V host offering a device to the
+ * guest. This function calls driver_attach(), which looks for an existing
+ * device matching the new id, and attaches the driver to which the new id
+ * has been assigned. vmbus_device_register() calls device_register(), which
+ * looks for a driver that matches the device being registered. If both
+ * operations are running simultaneously, the device driver probe function runs
+ * on whichever thread establishes the linkage between the driver and device.
+ *
+ * In most cases, it doesn't matter which thread runs the driver probe
+ * function. But if vmbus_device_register() does not find a matching driver,
+ * it proceeds to create the "channels" subdirectory and numbered per-channel
+ * subdirectory in sysfs. While that multi-step creation is in progress, this
+ * function could run the driver probe function. If the probe function checks
+ * for, or operates on, entries in the "channels" subdirectory, including by
+ * calling hv_create_ring_sysfs(), the operation may or may not succeed
+ * depending on the race. The race can't create a kernel failure in VMBus
+ * or device subsystem code, but probe functions in VMBus drivers doing such
+ * operations must be prepared for the failure case.
+ */
static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
{
struct vmbus_dynid *dynid;
@@ -861,7 +891,7 @@ static int vmbus_dma_configure(struct device *child_device)
* On x86/x64 coherence is assumed and these calls have no effect.
*/
hv_setup_dma_ops(child_device,
- device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT);
+ device_get_dma_attr(vmbus_root_device) == DEV_DMA_COHERENT);
return 0;
}
@@ -1921,7 +1951,8 @@ static const struct kobj_type vmbus_chan_ktype = {
* ring for userspace to use.
* Note: Race conditions can happen with userspace and it is not encouraged to create new
* use-cases for this. This was added to maintain backward compatibility, while solving
- * one of the race conditions in uio_hv_generic while creating sysfs.
+ * one of the race conditions in uio_hv_generic while creating sysfs. See comments with
+ * vmbus_add_dynid() and vmbus_device_register().
*
* Returns 0 on success or error code on failure.
*/
@@ -2037,7 +2068,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
&child_device_obj->channel->offermsg.offer.if_instance);
child_device_obj->device.bus = &hv_bus;
- child_device_obj->device.parent = hv_dev;
+ child_device_obj->device.parent = vmbus_root_device;
child_device_obj->device.release = vmbus_device_release;
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
@@ -2055,6 +2086,20 @@ int vmbus_device_register(struct hv_device *child_device_obj)
return ret;
}
+ /*
+ * If device_register() found a driver to assign to the device, the
+ * driver's probe function has already run at this point. If that
+ * probe function accesses or operates on the "channels" subdirectory
+ * in sysfs, those operations will have failed because the "channels"
+ * subdirectory doesn't exist until the code below runs. Or if the
+ * probe function creates a /dev entry, a user space program could
+ * find and open the /dev entry, and then create a race by accessing
+ * the "channels" subdirectory while the creation steps are in progress
+ * here. The race can't result in a kernel failure, but the user space
+ * program may get an error in accessing "channels" or its
+ * subdirectories. See also comments with vmbus_add_dynid() about a
+ * related race condition.
+ */
child_device_obj->channels_kset = kset_create_and_add("channels",
NULL, kobj);
if (!child_device_obj->channels_kset) {
@@ -2412,7 +2457,7 @@ static int vmbus_acpi_add(struct platform_device *pdev)
struct acpi_device *ancestor;
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
- hv_dev = &device->dev;
+ vmbus_root_device = &device->dev;
/*
* Older versions of Hyper-V for ARM64 fail to include the _CCA
@@ -2465,6 +2510,31 @@ static int vmbus_acpi_add(struct platform_device *pdev)
}
#endif
+static int vmbus_set_irq(struct platform_device *pdev)
+{
+ struct irq_data *data;
+ int irq;
+ irq_hw_number_t hwirq;
+
+ irq = platform_get_irq(pdev, 0);
+ /* platform_get_irq() may not return 0. */
+ if (irq < 0)
+ return irq;
+
+ data = irq_get_irq_data(irq);
+ if (!data) {
+ pr_err("No interrupt data for VMBus virq %d\n", irq);
+ return -ENODEV;
+ }
+ hwirq = irqd_to_hwirq(data);
+
+ vmbus_irq = irq;
+ vmbus_interrupt = hwirq;
+ pr_debug("VMBus virq %d, hwirq %d\n", vmbus_irq, vmbus_interrupt);
+
+ return 0;
+}
+
static int vmbus_device_add(struct platform_device *pdev)
{
struct resource **cur_res = &hyperv_mmio;
@@ -2473,12 +2543,17 @@ static int vmbus_device_add(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- hv_dev = &pdev->dev;
+ vmbus_root_device = &pdev->dev;
ret = of_range_parser_init(&parser, np);
if (ret)
return ret;
+ if (!__is_defined(HYPERVISOR_CALLBACK_VECTOR))
+ ret = vmbus_set_irq(pdev);
+ if (ret)
+ return ret;
+
for_each_of_range(&parser, &range) {
struct resource *res;
@@ -2786,7 +2861,7 @@ static int __init hv_acpi_init(void)
if (ret)
return ret;
- if (!hv_dev) {
+ if (!vmbus_root_device) {
ret = -ENODEV;
goto cleanup;
}
@@ -2817,7 +2892,7 @@ static int __init hv_acpi_init(void)
cleanup:
platform_driver_unregister(&vmbus_platform_driver);
- hv_dev = NULL;
+ vmbus_root_device = NULL;
return ret;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5fd93aad2d6d..1b1d64493909 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -335,6 +335,26 @@ config SENSORS_K10TEMP
This driver can also be built as a module. If so, the module
will be called k10temp.
+config SENSORS_KBATT
+ tristate "KEBA battery controller support"
+ depends on KEBA_CP500
+ help
+ This driver supports the battery monitoring controller found in
+ KEBA system FPGA devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called kbatt.
+
+config SENSORS_KFAN
+ tristate "KEBA fan controller support"
+ depends on KEBA_CP500
+ help
+ This driver supports the fan controller found in KEBA system
+ FPGA devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called kfan.
+
config SENSORS_FAM15H_POWER
tristate "AMD Family 15h processor power"
depends on X86 && PCI && CPU_SUP_AMD
@@ -1308,6 +1328,15 @@ config SENSORS_MAX31790
This driver can also be built as a module. If so, the module
will be called max31790.
+config SENSORS_MAX77705
+ tristate "MAX77705 current and voltage sensor"
+ depends on MFD_MAX77705
+ help
+ If you say yes here you get support for MAX77705 sensors connected with I2C.
+
+ This driver can also be built as a module. If so, the module
+ will be called max77705-hwmon.
+
config SENSORS_MC34VR500
tristate "NXP MC34VR500 hardware monitoring driver"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e3468d024ff3..48e5866c0c9a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -110,6 +110,8 @@ obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
+obj-$(CONFIG_SENSORS_KBATT) += kbatt.o
+obj-$(CONFIG_SENSORS_KFAN) += kfan.o
obj-$(CONFIG_SENSORS_LAN966X) += lan966x-hwmon.o
obj-$(CONFIG_SENSORS_LENOVO_EC) += lenovo-ec-sensors.o
obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
@@ -161,6 +163,7 @@ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_MAX31827) += max31827.o
+obj-$(CONFIG_SENSORS_MAX77705) += max77705-hwmon.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MC34VR500) += mc34vr500.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
index 312ef3e98754..d1c55e2eb479 100644
--- a/drivers/hwmon/aht10.c
+++ b/drivers/hwmon/aht10.c
@@ -94,7 +94,7 @@ struct aht10_data {
unsigned int meas_size;
};
-/**
+/*
* aht10_init() - Initialize an AHT10/AHT20 chip
* @data: the data associated with this AHT10/AHT20 chip
* Return: 0 if successful, 1 if not
@@ -124,7 +124,7 @@ static int aht10_init(struct aht10_data *data)
return 0;
}
-/**
+/*
* aht10_polltime_expired() - check if the minimum poll interval has
* expired
* @data: the data containing the time to compare
@@ -140,7 +140,7 @@ static int aht10_polltime_expired(struct aht10_data *data)
DECLARE_CRC8_TABLE(crc8_table);
-/**
+/*
* crc8_check() - check crc of the sensor's measurements
* @raw_data: data frame received from sensor(including crc as the last byte)
* @count: size of the data frame
@@ -155,7 +155,7 @@ static int crc8_check(u8 *raw_data, int count)
return crc8(crc8_table, raw_data, count, CRC8_INIT_VALUE);
}
-/**
+/*
* aht10_read_values() - read and parse the raw data from the AHT10/AHT20
* @data: the struct aht10_data to use for the lock
* Return: 0 if successful, 1 if not
@@ -214,7 +214,7 @@ static int aht10_read_values(struct aht10_data *data)
return 0;
}
-/**
+/*
* aht10_interval_write() - store the given minimum poll interval.
* Return: 0 on success, -EINVAL if a value lower than the
* AHT10_MIN_POLL_INTERVAL is given
@@ -226,7 +226,7 @@ static ssize_t aht10_interval_write(struct aht10_data *data,
return 0;
}
-/**
+/*
* aht10_interval_read() - read the minimum poll interval
* in milliseconds
*/
@@ -237,7 +237,7 @@ static ssize_t aht10_interval_read(struct aht10_data *data,
return 0;
}
-/**
+/*
* aht10_temperature1_read() - read the temperature in millidegrees
*/
static int aht10_temperature1_read(struct aht10_data *data, long *val)
@@ -252,7 +252,7 @@ static int aht10_temperature1_read(struct aht10_data *data, long *val)
return 0;
}
-/**
+/*
* aht10_humidity1_read() - read the relative humidity in millipercent
*/
static int aht10_humidity1_read(struct aht10_data *data, long *val)
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 1e3c6acd8974..13a789cc85d2 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -23,9 +23,12 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
+#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <dt-bindings/pwm/pwm.h>
+
/*
* Addresses to scan.
*/
@@ -37,7 +40,7 @@ static const unsigned short normal_i2c[] = {0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e,
* Insmod parameters
*/
-static int pwminv; /*Inverted PWM output. */
+static int pwminv = -1; /*Inverted PWM output. */
module_param(pwminv, int, 0444);
static int init = 1; /*Power-on initialization.*/
@@ -845,9 +848,43 @@ static int amc6821_detect(struct i2c_client *client, struct i2c_board_info *info
return 0;
}
-static int amc6821_init_client(struct amc6821_data *data)
+static enum pwm_polarity amc6821_pwm_polarity(struct i2c_client *client)
+{
+ enum pwm_polarity polarity = PWM_POLARITY_NORMAL;
+ struct of_phandle_args args;
+ struct device_node *fan_np;
+
+ /*
+ * For backward compatibility, the pwminv module parameter takes
+ * always the precedence over any other device description
+ */
+ if (pwminv == 0)
+ return PWM_POLARITY_NORMAL;
+ if (pwminv > 0)
+ return PWM_POLARITY_INVERSED;
+
+ fan_np = of_get_child_by_name(client->dev.of_node, "fan");
+ if (!fan_np)
+ return PWM_POLARITY_NORMAL;
+
+ if (of_parse_phandle_with_args(fan_np, "pwms", "#pwm-cells", 0, &args))
+ goto out;
+ of_node_put(args.np);
+
+ if (args.args_count != 2)
+ goto out;
+
+ if (args.args[1] & PWM_POLARITY_INVERTED)
+ polarity = PWM_POLARITY_INVERSED;
+out:
+ of_node_put(fan_np);
+ return polarity;
+}
+
+static int amc6821_init_client(struct i2c_client *client, struct amc6821_data *data)
{
struct regmap *regmap = data->regmap;
+ u32 regval;
int err;
if (init) {
@@ -864,11 +901,14 @@ static int amc6821_init_client(struct amc6821_data *data)
if (err)
return err;
+ regval = AMC6821_CONF1_START;
+ if (amc6821_pwm_polarity(client) == PWM_POLARITY_INVERSED)
+ regval |= AMC6821_CONF1_PWMINV;
+
err = regmap_update_bits(regmap, AMC6821_REG_CONF1,
AMC6821_CONF1_THERMOVIE | AMC6821_CONF1_FANIE |
AMC6821_CONF1_START | AMC6821_CONF1_PWMINV,
- AMC6821_CONF1_START |
- (pwminv ? AMC6821_CONF1_PWMINV : 0));
+ regval);
if (err)
return err;
}
@@ -916,7 +956,7 @@ static int amc6821_probe(struct i2c_client *client)
"Failed to initialize regmap\n");
data->regmap = regmap;
- err = amc6821_init_client(data);
+ err = amc6821_init_client(client, data);
if (err)
return err;
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 006ced5ab6e6..e0a95197c71b 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -169,7 +169,11 @@ enum board_family {
family_intel_600_series
};
-/* All the known sensors for ASUS EC controllers */
+/*
+ * All the known sensors for ASUS EC controllers. These arrays have to be sorted
+ * by the full ((bank << 8) + index) register index (see asus_ec_block_read() as
+ * to why).
+ */
static const struct ec_sensor_info sensors_family_amd_400[] = {
[ec_sensor_temp_chipset] =
EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
@@ -183,10 +187,10 @@ static const struct ec_sensor_info sensors_family_amd_400[] = {
EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
[ec_sensor_in_cpu_core] =
EC_SENSOR("CPU Core", hwmon_in, 2, 0x00, 0xa2),
- [ec_sensor_fan_cpu_opt] =
- EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
[ec_sensor_fan_vrm_hs] =
EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
[ec_sensor_fan_chipset] =
/* no chipset fans in this generation */
EC_SENSOR("Chipset", hwmon_fan, 0, 0x00, 0x00),
@@ -194,10 +198,10 @@ static const struct ec_sensor_info sensors_family_amd_400[] = {
EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xb4),
[ec_sensor_curr_cpu] =
EC_SENSOR("CPU", hwmon_curr, 1, 0x00, 0xf4),
- [ec_sensor_temp_water_in] =
- EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x0d),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x0b),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x0d),
};
static const struct ec_sensor_info sensors_family_amd_500[] = {
@@ -239,19 +243,20 @@ static const struct ec_sensor_info sensors_family_amd_500[] = {
static const struct ec_sensor_info sensors_family_amd_600[] = {
[ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x30),
- [ec_sensor_temp_cpu_package] = EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
+ [ec_sensor_temp_cpu_package] =
+ EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
[ec_sensor_temp_mb] =
EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x32),
[ec_sensor_temp_vrm] =
EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x33),
[ec_sensor_temp_t_sensor] =
EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x36),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
[ec_sensor_temp_water_in] =
EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
- [ec_sensor_fan_cpu_opt] =
- EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
};
static const struct ec_sensor_info sensors_family_intel_300[] = {
@@ -278,6 +283,14 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
[ec_sensor_temp_t_sensor] =
EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
[ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
+ [ec_sensor_fan_water_flow] =
+ EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbe),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
+ [ec_sensor_temp_water_out] =
+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+ [ec_sensor_temp_water_block_in] =
+ EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02),
};
/* Shortcuts for common combinations */
@@ -300,6 +313,15 @@ struct ec_board_info {
enum board_family family;
};
+static const struct ec_board_info board_info_maximus_vi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_intel_300_series,
+};
+
static const struct ec_board_info board_info_prime_x470_pro = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -402,6 +424,13 @@ static const struct ec_board_info board_info_maximus_xi_hero = {
.family = family_intel_300_series,
};
+static const struct ec_board_info board_info_maximus_z690_formula = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_SET_TEMP_WATER | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
static const struct ec_board_info board_info_crosshair_viii_impact = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -507,6 +536,8 @@ static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
}
static const struct dmi_system_id dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("MAXIMUS VI HERO",
+ &board_info_maximus_vi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
&board_info_prime_x470_pro),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
@@ -537,6 +568,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_maximus_xi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
&board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS Z690 FORMULA",
+ &board_info_maximus_z690_formula),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
&board_info_crosshair_viii_impact),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
@@ -933,6 +966,10 @@ static int asus_ec_hwmon_read_string(struct device *dev,
{
struct ec_sensors_data *state = dev_get_drvdata(dev);
int sensor_index = find_ec_sensor_index(state, type, channel);
+
+ if (sensor_index < 0)
+ return sensor_index;
+
*str = get_sensor_info(state, sensor_index)->label;
return 0;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 79e5606e6d2f..1e2c8e284001 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1274,6 +1274,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
+ .ident = "Dell OptiPlex 7050",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7050"),
+ },
+ },
+ {
.ident = "Dell Precision",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index b779240328d5..516c34bb61c9 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -20,6 +20,9 @@
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
struct gpio_fan_speed {
@@ -42,6 +45,7 @@ struct gpio_fan_data {
bool pwm_enable;
struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
+ struct regulator *supply;
};
/*
@@ -125,13 +129,32 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
}
/* Must be called with fan_data->lock held, except during initialization. */
-static void set_fan_speed(struct gpio_fan_data *fan_data, int speed_index)
+static int set_fan_speed(struct gpio_fan_data *fan_data, int speed_index)
{
if (fan_data->speed_index == speed_index)
- return;
+ return 0;
+
+ if (fan_data->speed_index == 0 && speed_index > 0) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(fan_data->dev);
+ if (ret < 0)
+ return ret;
+ }
__set_fan_ctrl(fan_data, fan_data->speed[speed_index].ctrl_val);
+
+ if (fan_data->speed_index > 0 && speed_index == 0) {
+ int ret;
+
+ ret = pm_runtime_put_sync(fan_data->dev);
+ if (ret < 0)
+ return ret;
+ }
+
fan_data->speed_index = speed_index;
+
+ return 0;
}
static int get_fan_speed_index(struct gpio_fan_data *fan_data)
@@ -176,7 +199,7 @@ static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long pwm;
int speed_index;
- int ret = count;
+ int ret;
if (kstrtoul(buf, 10, &pwm) || pwm > 255)
return -EINVAL;
@@ -189,12 +212,12 @@ static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
}
speed_index = DIV_ROUND_UP(pwm * (fan_data->num_speed - 1), 255);
- set_fan_speed(fan_data, speed_index);
+ ret = set_fan_speed(fan_data, speed_index);
exit_unlock:
mutex_unlock(&fan_data->lock);
- return ret;
+ return ret ? ret : count;
}
static ssize_t pwm1_enable_show(struct device *dev,
@@ -211,6 +234,7 @@ static ssize_t pwm1_enable_store(struct device *dev,
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long val;
+ int ret = 0;
if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
@@ -224,11 +248,11 @@ static ssize_t pwm1_enable_store(struct device *dev,
/* Disable manual control mode: set fan at full speed. */
if (val == 0)
- set_fan_speed(fan_data, fan_data->num_speed - 1);
+ ret = set_fan_speed(fan_data, fan_data->num_speed - 1);
mutex_unlock(&fan_data->lock);
- return count;
+ return ret ? ret : count;
}
static ssize_t pwm1_mode_show(struct device *dev,
@@ -279,7 +303,7 @@ static ssize_t set_rpm(struct device *dev, struct device_attribute *attr,
goto exit_unlock;
}
- set_fan_speed(fan_data, rpm_to_speed_index(fan_data, rpm));
+ ret = set_fan_speed(fan_data, rpm_to_speed_index(fan_data, rpm));
exit_unlock:
mutex_unlock(&fan_data->lock);
@@ -386,6 +410,7 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct gpio_fan_data *fan_data = cdev->devdata;
+ int ret;
if (!fan_data)
return -EINVAL;
@@ -395,11 +420,11 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, state);
+ ret = set_fan_speed(fan_data, state);
mutex_unlock(&fan_data->lock);
- return 0;
+ return ret;
}
static const struct thermal_cooling_device_ops gpio_fan_cool_ops = {
@@ -499,6 +524,8 @@ static void gpio_fan_stop(void *data)
mutex_lock(&fan_data->lock);
set_fan_speed(data, 0);
mutex_unlock(&fan_data->lock);
+
+ pm_runtime_disable(fan_data->dev);
}
static int gpio_fan_probe(struct platform_device *pdev)
@@ -521,6 +548,11 @@ static int gpio_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fan_data);
mutex_init(&fan_data->lock);
+ fan_data->supply = devm_regulator_get(dev, "fan");
+ if (IS_ERR(fan_data->supply))
+ return dev_err_probe(dev, PTR_ERR(fan_data->supply),
+ "Failed to get fan-supply");
+
/* Configure control GPIOs if available. */
if (fan_data->gpios && fan_data->num_gpios > 0) {
if (!fan_data->speed || fan_data->num_speed <= 1)
@@ -548,6 +580,17 @@ static int gpio_fan_probe(struct platform_device *pdev)
return err;
}
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ /* If current GPIO state is active, mark RPM as active as well */
+ if (fan_data->speed_index > 0) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+ }
+
/* Optional cooling device register for Device tree platforms */
fan_data->cdev = devm_thermal_of_cooling_device_register(dev, np,
"gpio-fan", fan_data, &gpio_fan_cool_ops);
@@ -565,41 +608,69 @@ static void gpio_fan_shutdown(struct platform_device *pdev)
set_fan_speed(fan_data, 0);
}
+static int gpio_fan_runtime_suspend(struct device *dev)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fan_data->supply)
+ ret = regulator_disable(fan_data->supply);
+
+ return ret;
+}
+
+static int gpio_fan_runtime_resume(struct device *dev)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fan_data->supply)
+ ret = regulator_enable(fan_data->supply);
+
+ return ret;
+}
+
static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, 0);
+ ret = set_fan_speed(fan_data, 0);
mutex_unlock(&fan_data->lock);
}
- return 0;
+ return ret;
}
static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
if (fan_data->gpios) {
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, fan_data->resume_speed);
+ ret = set_fan_speed(fan_data, fan_data->resume_speed);
mutex_unlock(&fan_data->lock);
}
- return 0;
+ return ret;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
+static const struct dev_pm_ops gpio_fan_pm = {
+ RUNTIME_PM_OPS(gpio_fan_runtime_suspend,
+ gpio_fan_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(gpio_fan_suspend, gpio_fan_resume)
+};
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
- .pm = pm_sleep_ptr(&gpio_fan_pm),
+ .pm = pm_ptr(&gpio_fan_pm),
.of_match_table = of_gpio_fan_match,
},
};
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 2d9f12f68d50..a4a41742786b 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -21,11 +21,14 @@
#define INA238_CONFIG 0x0
#define INA238_ADC_CONFIG 0x1
#define INA238_SHUNT_CALIBRATION 0x2
+#define SQ52206_SHUNT_TEMPCO 0x3
#define INA238_SHUNT_VOLTAGE 0x4
#define INA238_BUS_VOLTAGE 0x5
#define INA238_DIE_TEMP 0x6
#define INA238_CURRENT 0x7
#define INA238_POWER 0x8
+#define SQ52206_ENERGY 0x9
+#define SQ52206_CHARGE 0xa
#define INA238_DIAG_ALERT 0xb
#define INA238_SHUNT_OVER_VOLTAGE 0xc
#define INA238_SHUNT_UNDER_VOLTAGE 0xd
@@ -33,9 +36,12 @@
#define INA238_BUS_UNDER_VOLTAGE 0xf
#define INA238_TEMP_LIMIT 0x10
#define INA238_POWER_LIMIT 0x11
+#define SQ52206_POWER_PEAK 0x20
#define INA238_DEVICE_ID 0x3f /* not available on INA237 */
#define INA238_CONFIG_ADCRANGE BIT(4)
+#define SQ52206_CONFIG_ADCRANGE_HIGH BIT(4)
+#define SQ52206_CONFIG_ADCRANGE_LOW BIT(3)
#define INA238_DIAG_ALERT_TMPOL BIT(7)
#define INA238_DIAG_ALERT_SHNTOL BIT(6)
@@ -44,12 +50,13 @@
#define INA238_DIAG_ALERT_BUSUL BIT(3)
#define INA238_DIAG_ALERT_POL BIT(2)
-#define INA238_REGISTERS 0x11
+#define INA238_REGISTERS 0x20
#define INA238_RSHUNT_DEFAULT 10000 /* uOhm */
/* Default configuration of device on reset. */
#define INA238_CONFIG_DEFAULT 0
+#define SQ52206_CONFIG_DEFAULT 0x0005
/* 16 sample averaging, 1052us conversion time, continuous mode */
#define INA238_ADC_CONFIG_DEFAULT 0xfb6a
/* Configure alerts to be based on averaged value (SLOWALERT) */
@@ -87,14 +94,19 @@
* shunt = 0x4000 / (819.2 * 10^6) / 0.001 = 20000 uOhms (with 1mA/lsb)
*
* Current (mA) = register value * 20000 / rshunt / 4 * gain
- * Power (W) = 0.2 * register value * 20000 / rshunt / 4 * gain
+ * Power (mW) = 0.2 * register value * 20000 / rshunt / 4 * gain
+ * (Specific for SQ52206)
+ * Power (mW) = 0.24 * register value * 20000 / rshunt / 4 * gain
+ * Energy (mJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain
*/
#define INA238_CALIBRATION_VALUE 16384
#define INA238_FIXED_SHUNT 20000
#define INA238_SHUNT_VOLTAGE_LSB 5 /* 5 uV/lsb */
#define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */
-#define INA238_DIE_TEMP_LSB 125 /* 125 mC/lsb */
+#define INA238_DIE_TEMP_LSB 1250000 /* 125.0000 mC/lsb */
+#define SQ52206_BUS_VOLTAGE_LSB 3750 /* 3.75 mV/lsb */
+#define SQ52206_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
static const struct regmap_config ina238_regmap_config = {
.max_register = INA238_REGISTERS,
@@ -102,7 +114,20 @@ static const struct regmap_config ina238_regmap_config = {
.val_bits = 16,
};
+enum ina238_ids { ina238, ina237, sq52206 };
+
+struct ina238_config {
+ bool has_power_highest; /* chip detection power peak */
+ bool has_energy; /* chip detection energy */
+ u8 temp_shift; /* fixed parameters for temp calculate */
+ u32 power_calculate_factor; /* fixed parameters for power calculate */
+ u16 config_default; /* Power-on default state */
+ int bus_voltage_lsb; /* use for temperature calculate, uV/lsb */
+ int temp_lsb; /* use for temperature calculate */
+};
+
struct ina238_data {
+ const struct ina238_config *config;
struct i2c_client *client;
struct mutex config_lock;
struct regmap *regmap;
@@ -110,6 +135,36 @@ struct ina238_data {
int gain;
};
+static const struct ina238_config ina238_config[] = {
+ [ina238] = {
+ .has_energy = false,
+ .has_power_highest = false,
+ .temp_shift = 4,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_lsb = INA238_DIE_TEMP_LSB,
+ },
+ [ina237] = {
+ .has_energy = false,
+ .has_power_highest = false,
+ .temp_shift = 4,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_lsb = INA238_DIE_TEMP_LSB,
+ },
+ [sq52206] = {
+ .has_energy = true,
+ .has_power_highest = true,
+ .temp_shift = 0,
+ .power_calculate_factor = 24,
+ .config_default = SQ52206_CONFIG_DEFAULT,
+ .bus_voltage_lsb = SQ52206_BUS_VOLTAGE_LSB,
+ .temp_lsb = SQ52206_DIE_TEMP_LSB,
+ },
+};
+
static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
{
u8 data[3];
@@ -126,6 +181,24 @@ static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
return 0;
}
+static int ina238_read_reg40(const struct i2c_client *client, u8 reg, u64 *val)
+{
+ u8 data[5];
+ u32 low;
+ int err;
+
+ /* 40-bit register read */
+ err = i2c_smbus_read_i2c_block_data(client, reg, 5, data);
+ if (err < 0)
+ return err;
+ if (err != 5)
+ return -EIO;
+ low = (data[1] << 24) | (data[2] << 16) | (data[3] << 8) | data[4];
+ *val = ((long long)data[0] << 32) | low;
+
+ return 0;
+}
+
static int ina238_read_in(struct device *dev, u32 attr, int channel,
long *val)
{
@@ -197,10 +270,10 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
regval = (s16)regval;
if (channel == 0)
/* gain of 1 -> LSB / 4 */
- *val = (regval * INA238_SHUNT_VOLTAGE_LSB) /
- (1000 * (4 - data->gain + 1));
+ *val = (regval * INA238_SHUNT_VOLTAGE_LSB) *
+ data->gain / (1000 * 4);
else
- *val = (regval * INA238_BUS_VOLTAGE_LSB) / 1000;
+ *val = (regval * data->config->bus_voltage_lsb) / 1000;
break;
case hwmon_in_max_alarm:
case hwmon_in_min_alarm:
@@ -225,8 +298,8 @@ static int ina238_write_in(struct device *dev, u32 attr, int channel,
case 0:
/* signed value, clamp to max range +/-163 mV */
regval = clamp_val(val, -163, 163);
- regval = (regval * 1000 * (4 - data->gain + 1)) /
- INA238_SHUNT_VOLTAGE_LSB;
+ regval = (regval * 1000 * 4) /
+ (INA238_SHUNT_VOLTAGE_LSB * data->gain);
regval = clamp_val(regval, S16_MIN, S16_MAX);
switch (attr) {
@@ -242,7 +315,7 @@ static int ina238_write_in(struct device *dev, u32 attr, int channel,
case 1:
/* signed value, positive values only. Clamp to max 102.396 V */
regval = clamp_val(val, 0, 102396);
- regval = (regval * 1000) / INA238_BUS_VOLTAGE_LSB;
+ regval = (regval * 1000) / data->config->bus_voltage_lsb;
regval = clamp_val(regval, 0, S16_MAX);
switch (attr) {
@@ -297,8 +370,19 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
return err;
/* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT *
- data->gain, 20 * data->rshunt);
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ /* Clamp value to maximum value of long */
+ *val = clamp_val(power, 0, LONG_MAX);
+ break;
+ case hwmon_power_input_highest:
+ err = ina238_read_reg24(data->client, SQ52206_POWER_PEAK, &regval);
+ if (err)
+ return err;
+
+ /* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -311,8 +395,8 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
* Truncated 24-bit compare register, lower 8-bits are
* truncated. Same conversion to/from uW as POWER register.
*/
- power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT *
- data->gain, 20 * data->rshunt);
+ power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -344,8 +428,8 @@ static int ina238_write_power(struct device *dev, u32 attr, long val)
* register.
*/
regval = clamp_val(val, 0, LONG_MAX);
- regval = div_u64(val * 20ULL * data->rshunt,
- 1000ULL * INA238_FIXED_SHUNT * data->gain);
+ regval = div_u64(val * 4 * 100 * data->rshunt, data->config->power_calculate_factor *
+ 1000ULL * INA238_FIXED_SHUNT * data->gain);
regval = clamp_val(regval >> 8, 0, U16_MAX);
return regmap_write(data->regmap, INA238_POWER_LIMIT, regval);
@@ -362,17 +446,17 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
err = regmap_read(data->regmap, INA238_DIE_TEMP, &regval);
if (err)
return err;
-
- /* Signed, bits 15-4 of register, result in mC */
- *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ /* Signed, result in mC */
+ *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max:
err = regmap_read(data->regmap, INA238_TEMP_LIMIT, &regval);
if (err)
return err;
-
- /* Signed, bits 15-4 of register, result in mC */
- *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ /* Signed, result in mC */
+ *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max_alarm:
err = regmap_read(data->regmap, INA238_DIAG_ALERT, &regval);
@@ -396,13 +480,33 @@ static int ina238_write_temp(struct device *dev, u32 attr, long val)
if (attr != hwmon_temp_max)
return -EOPNOTSUPP;
- /* Signed, bits 15-4 of register */
- regval = (val / INA238_DIE_TEMP_LSB) << 4;
- regval = clamp_val(regval, S16_MIN, S16_MAX) & 0xfff0;
+ /* Signed */
+ regval = clamp_val(val, -40000, 125000);
+ regval = div_s64(val * 10000, data->config->temp_lsb) << data->config->temp_shift;
+ regval = clamp_val(regval, S16_MIN, S16_MAX) & (0xffff << data->config->temp_shift);
return regmap_write(data->regmap, INA238_TEMP_LIMIT, regval);
}
+static ssize_t energy1_input_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int ret;
+ u64 regval;
+ u64 energy;
+
+ ret = ina238_read_reg40(data->client, SQ52206_ENERGY, &regval);
+ if (ret)
+ return ret;
+
+ /* result in mJ */
+ energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+
+ return sysfs_emit(buf, "%llu\n", energy);
+}
+
static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -422,7 +526,7 @@ static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
}
static int ina238_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
+ u32 attr, int channel, long val)
{
struct ina238_data *data = dev_get_drvdata(dev);
int err;
@@ -452,6 +556,9 @@ static umode_t ina238_is_visible(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ const struct ina238_data *data = drvdata;
+ bool has_power_highest = data->config->has_power_highest;
+
switch (type) {
case hwmon_in:
switch (attr) {
@@ -479,6 +586,10 @@ static umode_t ina238_is_visible(const void *drvdata,
return 0444;
case hwmon_power_max:
return 0644;
+ case hwmon_power_input_highest:
+ if (has_power_highest)
+ return 0444;
+ return 0;
default:
return 0;
}
@@ -512,7 +623,8 @@ static const struct hwmon_channel_info * const ina238_info[] = {
HWMON_C_INPUT),
HWMON_CHANNEL_INFO(power,
/* 0: power */
- HWMON_P_INPUT | HWMON_P_MAX | HWMON_P_MAX_ALARM),
+ HWMON_P_INPUT | HWMON_P_MAX |
+ HWMON_P_MAX_ALARM | HWMON_P_INPUT_HIGHEST),
HWMON_CHANNEL_INFO(temp,
/* 0: die temperature */
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_ALARM),
@@ -530,20 +642,35 @@ static const struct hwmon_chip_info ina238_chip_info = {
.info = ina238_info,
};
+/* energy attributes are 5 bytes wide so we need u64 */
+static DEVICE_ATTR_RO(energy1_input);
+
+static struct attribute *ina238_attrs[] = {
+ &dev_attr_energy1_input.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ina238);
+
static int ina238_probe(struct i2c_client *client)
{
struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct ina238_data *data;
+ enum ina238_ids chip;
int config;
int ret;
+ chip = (uintptr_t)i2c_get_match_data(client);
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
+ /* set the device type */
+ data->config = &ina238_config[chip];
+
mutex_init(&data->config_lock);
data->regmap = devm_regmap_init_i2c(client, &ina238_regmap_config);
@@ -564,15 +691,21 @@ static int ina238_probe(struct i2c_client *client)
/* load shunt gain value */
if (device_property_read_u32(dev, "ti,shunt-gain", &data->gain) < 0)
data->gain = 4; /* Default of ADCRANGE = 0 */
- if (data->gain != 1 && data->gain != 4) {
+ if (data->gain != 1 && data->gain != 2 && data->gain != 4) {
dev_err(dev, "invalid shunt gain value %u\n", data->gain);
return -EINVAL;
}
/* Setup CONFIG register */
- config = INA238_CONFIG_DEFAULT;
- if (data->gain == 1)
+ config = data->config->config_default;
+ if (chip == sq52206) {
+ if (data->gain == 1)
+ config |= SQ52206_CONFIG_ADCRANGE_HIGH; /* ADCRANGE = 10/11 is /1 */
+ else if (data->gain == 2)
+ config |= SQ52206_CONFIG_ADCRANGE_LOW; /* ADCRANGE = 01 is /2 */
+ } else if (data->gain == 1) {
config |= INA238_CONFIG_ADCRANGE; /* ADCRANGE = 1 is /1 */
+ }
ret = regmap_write(data->regmap, INA238_CONFIG, config);
if (ret < 0) {
dev_err(dev, "error configuring the device: %d\n", ret);
@@ -605,7 +738,8 @@ static int ina238_probe(struct i2c_client *client)
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
&ina238_chip_info,
- NULL);
+ data->config->has_energy ?
+ ina238_groups : NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
@@ -616,15 +750,27 @@ static int ina238_probe(struct i2c_client *client)
}
static const struct i2c_device_id ina238_id[] = {
- { "ina238" },
+ { "ina237", ina237 },
+ { "ina238", ina238 },
+ { "sq52206", sq52206 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ina238_id);
static const struct of_device_id __maybe_unused ina238_of_match[] = {
- { .compatible = "ti,ina237" },
- { .compatible = "ti,ina238" },
- { },
+ {
+ .compatible = "ti,ina237",
+ .data = (void *)ina237
+ },
+ {
+ .compatible = "ti,ina238",
+ .data = (void *)ina238
+ },
+ {
+ .compatible = "silergy,sq52206",
+ .data = (void *)sq52206
+ },
+ { }
};
MODULE_DEVICE_TABLE(of, ina238_of_match);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 345fe7db9de9..bc3c1f7314b3 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -959,8 +959,12 @@ static int ina2xx_probe(struct i2c_client *client)
return PTR_ERR(data->regmap);
}
- ret = devm_regulator_get_enable(dev, "vs");
- if (ret)
+ /*
+ * Regulator core returns -ENODEV if the 'vs' is not available.
+ * Hence the check for -ENODEV return code is necessary.
+ */
+ ret = devm_regulator_get_enable_optional(dev, "vs");
+ if (ret < 0 && ret != -ENODEV)
return dev_err_probe(dev, ret, "failed to enable vs regulator\n");
ret = ina2xx_init(dev, data);
diff --git a/drivers/hwmon/isl28022.c b/drivers/hwmon/isl28022.c
index 1fb9864635db..c2e559dde63f 100644
--- a/drivers/hwmon/isl28022.c
+++ b/drivers/hwmon/isl28022.c
@@ -154,6 +154,7 @@ static int isl28022_read_current(struct device *dev, u32 attr, long *val)
struct isl28022_data *data = dev_get_drvdata(dev);
unsigned int regval;
int err;
+ u16 sign_bit;
switch (attr) {
case hwmon_curr_input:
@@ -161,8 +162,9 @@ static int isl28022_read_current(struct device *dev, u32 attr, long *val)
ISL28022_REG_CURRENT, &regval);
if (err < 0)
return err;
- *val = ((long)regval * 1250L * (long)data->gain) /
- (long)data->shunt;
+ sign_bit = (regval >> 15) & 0x01;
+ *val = (((long)(((u16)regval) & 0x7FFF) - (sign_bit * 32768)) *
+ 1250L * (long)data->gain) / (long)data->shunt;
break;
default:
return -EOPNOTSUPP;
@@ -301,7 +303,7 @@ static const struct regmap_config isl28022_regmap_config = {
.writeable_reg = isl28022_is_writeable_reg,
.volatile_reg = isl28022_is_volatile_reg,
.val_format_endian = REGMAP_ENDIAN_BIG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.use_single_read = true,
.use_single_write = true,
};
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 472bcf6092f6..babf2413d666 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -503,6 +503,13 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
k10temp_get_ccd_support(data, 12);
break;
}
+ } else if (boot_cpu_data.x86 == 0x1a) {
+ switch (boot_cpu_data.x86_model) {
+ case 0x40 ... 0x4f: /* Zen5 Ryzen Desktop */
+ data->ccd_offset = 0x308;
+ k10temp_get_ccd_support(data, 8);
+ break;
+ }
}
for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
diff --git a/drivers/hwmon/kbatt.c b/drivers/hwmon/kbatt.c
new file mode 100644
index 000000000000..501b8f4ded33
--- /dev/null
+++ b/drivers/hwmon/kbatt.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 KEBA Industrial Automation GmbH
+ *
+ * Driver for KEBA battery monitoring controller FPGA IP core
+ */
+
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/misc/keba.h>
+#include <linux/mutex.h>
+
+#define KBATT "kbatt"
+
+#define KBATT_CONTROL_REG 0x4
+#define KBATT_CONTROL_BAT_TEST 0x01
+
+#define KBATT_STATUS_REG 0x8
+#define KBATT_STATUS_BAT_OK 0x01
+
+#define KBATT_MAX_UPD_INTERVAL (10 * HZ)
+#define KBATT_SETTLE_TIME_US (100 * USEC_PER_MSEC)
+
+struct kbatt {
+ /* update lock */
+ struct mutex lock;
+ void __iomem *base;
+
+ unsigned long next_update; /* in jiffies */
+ bool alarm;
+};
+
+static bool kbatt_alarm(struct kbatt *kbatt)
+{
+ mutex_lock(&kbatt->lock);
+
+ if (!kbatt->next_update || time_after(jiffies, kbatt->next_update)) {
+ /* switch load on */
+ iowrite8(KBATT_CONTROL_BAT_TEST,
+ kbatt->base + KBATT_CONTROL_REG);
+
+ /* wait some time to let things settle */
+ fsleep(KBATT_SETTLE_TIME_US);
+
+ /* check battery state */
+ if (ioread8(kbatt->base + KBATT_STATUS_REG) &
+ KBATT_STATUS_BAT_OK)
+ kbatt->alarm = false;
+ else
+ kbatt->alarm = true;
+
+ /* switch load off */
+ iowrite8(0, kbatt->base + KBATT_CONTROL_REG);
+
+ kbatt->next_update = jiffies + KBATT_MAX_UPD_INTERVAL;
+ }
+
+ mutex_unlock(&kbatt->lock);
+
+ return kbatt->alarm;
+}
+
+static int kbatt_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct kbatt *kbatt = dev_get_drvdata(dev);
+
+ *val = kbatt_alarm(kbatt) ? 1 : 0;
+
+ return 0;
+}
+
+static umode_t kbatt_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (channel == 0 && attr == hwmon_in_min_alarm)
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *kbatt_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ /* 0: input minimum alarm channel */
+ HWMON_I_MIN_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops kbatt_hwmon_ops = {
+ .is_visible = kbatt_is_visible,
+ .read = kbatt_read,
+};
+
+static const struct hwmon_chip_info kbatt_chip_info = {
+ .ops = &kbatt_hwmon_ops,
+ .info = kbatt_info,
+};
+
+static int kbatt_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct keba_batt_auxdev *kbatt_auxdev =
+ container_of(auxdev, struct keba_batt_auxdev, auxdev);
+ struct device *dev = &auxdev->dev;
+ struct device *hwmon_dev;
+ struct kbatt *kbatt;
+ int retval;
+
+ kbatt = devm_kzalloc(dev, sizeof(*kbatt), GFP_KERNEL);
+ if (!kbatt)
+ return -ENOMEM;
+
+ retval = devm_mutex_init(dev, &kbatt->lock);
+ if (retval)
+ return retval;
+
+ kbatt->base = devm_ioremap_resource(dev, &kbatt_auxdev->io);
+ if (IS_ERR(kbatt->base))
+ return PTR_ERR(kbatt->base);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, KBATT, kbatt,
+ &kbatt_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct auxiliary_device_id kbatt_devtype_aux[] = {
+ { .name = "keba.batt" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, kbatt_devtype_aux);
+
+static struct auxiliary_driver kbatt_driver_aux = {
+ .name = KBATT,
+ .id_table = kbatt_devtype_aux,
+ .probe = kbatt_probe,
+};
+module_auxiliary_driver(kbatt_driver_aux);
+
+MODULE_AUTHOR("Petar Bojanic <boja@keba.com>");
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA battery monitoring controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/kfan.c b/drivers/hwmon/kfan.c
new file mode 100644
index 000000000000..f353acb66749
--- /dev/null
+++ b/drivers/hwmon/kfan.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 KEBA Industrial Automation GmbH
+ *
+ * Driver for KEBA fan controller FPGA IP core
+ *
+ */
+
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/misc/keba.h>
+
+#define KFAN "kfan"
+
+#define KFAN_CONTROL_REG 0x04
+
+#define KFAN_STATUS_REG 0x08
+#define KFAN_STATUS_PRESENT 0x01
+#define KFAN_STATUS_REGULABLE 0x02
+#define KFAN_STATUS_TACHO 0x04
+#define KFAN_STATUS_BLOCKED 0x08
+
+#define KFAN_TACHO_REG 0x0c
+
+#define KFAN_DEFAULT_DIV 2
+
+struct kfan {
+ void __iomem *base;
+ bool tacho;
+ bool regulable;
+
+ /* hwmon API configuration */
+ u32 fan_channel_config[2];
+ struct hwmon_channel_info fan_info;
+ u32 pwm_channel_config[2];
+ struct hwmon_channel_info pwm_info;
+ const struct hwmon_channel_info *info[3];
+ struct hwmon_chip_info chip;
+};
+
+static bool kfan_get_fault(struct kfan *kfan)
+{
+ u8 status = ioread8(kfan->base + KFAN_STATUS_REG);
+
+ if (!(status & KFAN_STATUS_PRESENT))
+ return true;
+
+ if (!kfan->tacho && (status & KFAN_STATUS_BLOCKED))
+ return true;
+
+ return false;
+}
+
+static unsigned int kfan_count_to_rpm(u16 count)
+{
+ if (count == 0 || count == 0xffff)
+ return 0;
+
+ return 5000000UL / (KFAN_DEFAULT_DIV * count);
+}
+
+static unsigned int kfan_get_rpm(struct kfan *kfan)
+{
+ unsigned int rpm;
+ u16 count;
+
+ count = ioread16(kfan->base + KFAN_TACHO_REG);
+ rpm = kfan_count_to_rpm(count);
+
+ return rpm;
+}
+
+static unsigned int kfan_get_pwm(struct kfan *kfan)
+{
+ return ioread8(kfan->base + KFAN_CONTROL_REG);
+}
+
+static int kfan_set_pwm(struct kfan *kfan, long val)
+{
+ if (val < 0 || val > 0xff)
+ return -EINVAL;
+
+ /* if none-regulable, then only 0 or 0xff can be written */
+ if (!kfan->regulable && val > 0)
+ val = 0xff;
+
+ iowrite8(val, kfan->base + KFAN_CONTROL_REG);
+
+ return 0;
+}
+
+static int kfan_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct kfan *kfan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return kfan_set_pwm(kfan, val);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int kfan_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct kfan *kfan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_fault:
+ *val = kfan_get_fault(kfan);
+ return 0;
+ case hwmon_fan_input:
+ *val = kfan_get_rpm(kfan);
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = kfan_get_pwm(kfan);
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t kfan_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops kfan_hwmon_ops = {
+ .is_visible = kfan_is_visible,
+ .read = kfan_read,
+ .write = kfan_write,
+};
+
+static int kfan_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct keba_fan_auxdev *kfan_auxdev =
+ container_of(auxdev, struct keba_fan_auxdev, auxdev);
+ struct device *dev = &auxdev->dev;
+ struct device *hwmon_dev;
+ struct kfan *kfan;
+ u8 status;
+
+ kfan = devm_kzalloc(dev, sizeof(*kfan), GFP_KERNEL);
+ if (!kfan)
+ return -ENOMEM;
+
+ kfan->base = devm_ioremap_resource(dev, &kfan_auxdev->io);
+ if (IS_ERR(kfan->base))
+ return PTR_ERR(kfan->base);
+
+ status = ioread8(kfan->base + KFAN_STATUS_REG);
+ if (status & KFAN_STATUS_REGULABLE)
+ kfan->regulable = true;
+ if (status & KFAN_STATUS_TACHO)
+ kfan->tacho = true;
+
+ /* fan */
+ kfan->fan_channel_config[0] = HWMON_F_FAULT;
+ if (kfan->tacho)
+ kfan->fan_channel_config[0] |= HWMON_F_INPUT;
+ kfan->fan_info.type = hwmon_fan;
+ kfan->fan_info.config = kfan->fan_channel_config;
+ kfan->info[0] = &kfan->fan_info;
+
+ /* PWM */
+ kfan->pwm_channel_config[0] = HWMON_PWM_INPUT;
+ kfan->pwm_info.type = hwmon_pwm;
+ kfan->pwm_info.config = kfan->pwm_channel_config;
+ kfan->info[1] = &kfan->pwm_info;
+
+ kfan->chip.ops = &kfan_hwmon_ops;
+ kfan->chip.info = kfan->info;
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, KFAN, kfan,
+ &kfan->chip, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct auxiliary_device_id kfan_devtype_aux[] = {
+ { .name = "keba.fan" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, kfan_devtype_aux);
+
+static struct auxiliary_driver kfan_driver_aux = {
+ .name = KFAN,
+ .id_table = kfan_devtype_aux,
+ .probe = kfan_probe,
+};
+module_auxiliary_driver(kfan_driver_aux);
+
+MODULE_AUTHOR("Petar Bojanic <boja@keba.com>");
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA fan controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index d95a3c6c245c..9b4875e2fd8d 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -622,7 +622,7 @@ static int lm75_i3c_reg_read(void *context, unsigned int reg, unsigned int *val)
{
.rnw = true,
.len = 2,
- .data.out = data->val_buf,
+ .data.in = data->val_buf,
},
};
int ret;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 75f09553fd67..c1f528e292f3 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1235,7 +1235,7 @@ static int lm90_update_alarms(struct lm90_data *data, bool force)
static void lm90_alert_work(struct work_struct *__work)
{
- struct delayed_work *delayed_work = container_of(__work, struct delayed_work, work);
+ struct delayed_work *delayed_work = to_delayed_work(__work);
struct lm90_data *data = container_of(delayed_work, struct lm90_data, alert_work);
/* Nothing to do if alerts are enabled */
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 541fa09dc6e7..a07e2eb93c71 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -256,33 +256,38 @@ static int ltc2992_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask
return 0;
}
-static void ltc2992_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int ltc2992_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ltc2992_state *st = gpiochip_get_data(chip);
unsigned long gpio_ctrl;
- int reg;
+ int reg, ret;
mutex_lock(&st->gpio_mutex);
reg = ltc2992_read_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1);
if (reg < 0) {
mutex_unlock(&st->gpio_mutex);
- return;
+ return reg;
}
gpio_ctrl = reg;
assign_bit(ltc2992_gpio_addr_map[offset].ctrl_bit, &gpio_ctrl, value);
- ltc2992_write_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1, gpio_ctrl);
+ ret = ltc2992_write_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1,
+ gpio_ctrl);
mutex_unlock(&st->gpio_mutex);
+
+ return ret;
}
-static void ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct ltc2992_state *st = gpiochip_get_data(chip);
unsigned long gpio_ctrl_io = 0;
unsigned long gpio_ctrl = 0;
unsigned int gpio_nr;
+ int ret;
for_each_set_bit(gpio_nr, mask, LTC2992_GPIO_NR) {
if (gpio_nr < 3)
@@ -293,9 +298,14 @@ static void ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mas
}
mutex_lock(&st->gpio_mutex);
- ltc2992_write_reg(st, LTC2992_GPIO_IO_CTRL, 1, gpio_ctrl_io);
- ltc2992_write_reg(st, LTC2992_GPIO_CTRL, 1, gpio_ctrl);
+ ret = ltc2992_write_reg(st, LTC2992_GPIO_IO_CTRL, 1, gpio_ctrl_io);
+ if (ret)
+ goto out;
+
+ ret = ltc2992_write_reg(st, LTC2992_GPIO_CTRL, 1, gpio_ctrl);
+out:
mutex_unlock(&st->gpio_mutex);
+ return ret;
}
static int ltc2992_config_gpio(struct ltc2992_state *st)
@@ -329,8 +339,8 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
st->gc.get = ltc2992_gpio_get;
st->gc.get_multiple = ltc2992_gpio_get_multiple;
- st->gc.set = ltc2992_gpio_set;
- st->gc.set_multiple = ltc2992_gpio_set_multiple;
+ st->gc.set_rv = ltc2992_gpio_set;
+ st->gc.set_multiple_rv = ltc2992_gpio_set_multiple;
ret = devm_gpiochip_add_data(&st->client->dev, &st->gc, st);
if (ret)
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 32b4d54b2076..a06346496e1d 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -80,6 +80,7 @@ struct max6639_data {
/* Register values initialized only once */
u8 ppr[MAX6639_NUM_CHANNELS]; /* Pulses per rotation 0..3 for 1..4 ppr */
u8 rpm_range[MAX6639_NUM_CHANNELS]; /* Index in above rpm_ranges table */
+ u32 target_rpm[MAX6639_NUM_CHANNELS];
/* Optional regulator for FAN supply */
struct regulator *reg;
@@ -563,6 +564,10 @@ static int max6639_probe_child_from_dt(struct i2c_client *client,
if (!err)
data->rpm_range[i] = rpm_range_to_reg(val);
+ err = of_property_read_u32(child, "target-rpm", &val);
+ if (!err)
+ data->target_rpm[i] = val;
+
return 0;
}
@@ -573,6 +578,7 @@ static int max6639_init_client(struct i2c_client *client,
const struct device_node *np = dev->of_node;
struct device_node *child;
int i, err;
+ u8 target_duty;
/* Reset chip to default values, see below for GCONFIG setup */
err = regmap_write(data->regmap, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_POR);
@@ -586,6 +592,8 @@ static int max6639_init_client(struct i2c_client *client,
/* default: 4000 RPM */
data->rpm_range[0] = 1;
data->rpm_range[1] = 1;
+ data->target_rpm[0] = 4000;
+ data->target_rpm[1] = 4000;
for_each_child_of_node(np, child) {
if (strcmp(child->name, "fan"))
@@ -639,8 +647,12 @@ static int max6639_init_client(struct i2c_client *client,
if (err)
return err;
- /* PWM 120/120 (i.e. 100%) */
- err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(i), 120);
+ /* Set PWM based on target RPM if specified */
+ if (data->target_rpm[i] > rpm_ranges[data->rpm_range[i]])
+ data->target_rpm[i] = rpm_ranges[data->rpm_range[i]];
+
+ target_duty = 120 * data->target_rpm[i] / rpm_ranges[data->rpm_range[i]];
+ err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(i), target_duty);
if (err)
return err;
}
diff --git a/drivers/hwmon/max77705-hwmon.c b/drivers/hwmon/max77705-hwmon.c
new file mode 100644
index 000000000000..990023e6474e
--- /dev/null
+++ b/drivers/hwmon/max77705-hwmon.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MAX77705 voltage and current hwmon driver.
+ *
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/max77705-private.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct channel_desc {
+ u8 reg;
+ u8 avg_reg;
+ const char *const label;
+ // register resolution. nano Volts for voltage, nano Amperes for current
+ u32 resolution;
+};
+
+static const struct channel_desc current_channel_desc[] = {
+ {
+ .reg = IIN_REG,
+ .label = "IIN_REG",
+ .resolution = 125000
+ },
+ {
+ .reg = ISYS_REG,
+ .avg_reg = AVGISYS_REG,
+ .label = "ISYS_REG",
+ .resolution = 312500
+ }
+};
+
+static const struct channel_desc voltage_channel_desc[] = {
+ {
+ .reg = VBYP_REG,
+ .label = "VBYP_REG",
+ .resolution = 427246
+ },
+ {
+ .reg = VSYS_REG,
+ .label = "VSYS_REG",
+ .resolution = 156250
+ }
+};
+
+static int max77705_read_and_convert(struct regmap *regmap, u8 reg, u32 res,
+ bool is_signed, long *val)
+{
+ int ret;
+ u32 regval;
+
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ if (is_signed)
+ *val = mult_frac((long)sign_extend32(regval, 15), res, 1000000);
+ else
+ *val = mult_frac((long)regval, res, 1000000);
+
+ return 0;
+}
+
+static umode_t max77705_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_label:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_curr_average:
+ if (current_channel_desc[channel].avg_reg)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int max77705_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **buf)
+{
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_in_label:
+ *buf = current_channel_desc[channel].label;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *buf = voltage_channel_desc[channel].label;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int max77705_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u8 reg;
+ u32 res;
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ reg = current_channel_desc[channel].reg;
+ res = current_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, true, val);
+ case hwmon_curr_average:
+ reg = current_channel_desc[channel].avg_reg;
+ res = current_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, true, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ reg = voltage_channel_desc[channel].reg;
+ res = voltage_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, false, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops max77705_hwmon_ops = {
+ .is_visible = max77705_is_visible,
+ .read = max77705_read,
+ .read_string = max77705_read_string,
+};
+
+static const struct hwmon_channel_info *max77705_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL
+ ),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_AVERAGE | HWMON_C_LABEL
+ ),
+ NULL
+};
+
+static const struct hwmon_chip_info max77705_chip_info = {
+ .ops = &max77705_hwmon_ops,
+ .info = max77705_info,
+};
+
+static int max77705_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, "max77705", regmap,
+ &max77705_chip_info, NULL);
+ if (IS_ERR(hwmon_dev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hwmon_dev),
+ "Unable to register hwmon device\n");
+
+ return 0;
+};
+
+static struct platform_driver max77705_hwmon_driver = {
+ .driver = {
+ .name = "max77705-hwmon",
+ },
+ .probe = max77705_hwmon_probe,
+};
+
+module_platform_driver(max77705_hwmon_driver);
+
+MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>");
+MODULE_DESCRIPTION("MAX77705 monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct7363.c b/drivers/hwmon/nct7363.c
index be7bf32f6e68..e13ab918b1ab 100644
--- a/drivers/hwmon/nct7363.c
+++ b/drivers/hwmon/nct7363.c
@@ -391,7 +391,7 @@ static const struct regmap_config nct7363_regmap_config = {
.val_bits = 8,
.use_single_read = true,
.use_single_write = true,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = nct7363_regmap_is_volatile,
};
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index c9b3c3149982..441f984a859d 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -218,6 +218,24 @@ config SENSORS_LM25066_REGULATOR
If you say yes here you get regulator support for National
Semiconductor LM25066, LM5064, and LM5066.
+config SENSORS_LT3074
+ tristate "Analog Devices LT3074"
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices LT3074.
+
+ This driver can also be built as a module. If so, the module will
+ be called lt3074.
+
+config SENSORS_LT3074_REGULATOR
+ tristate "Regulator support for LT3074"
+ depends on SENSORS_LT3074 && REGULATOR
+ help
+ If you say yes here you get regulator support for Analog Devices
+ LT3074. The LT3074 is a low voltage, ultralow noise, high PSRR,
+ dropout linear regulator. The device supplies up to 3A with a
+ typical dropout voltage of 45mV.
+
config SENSORS_LT7182S
tristate "Analog Devices LT7182S"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 56f128c4653e..29cd8a3317d2 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o
obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
+obj-$(CONFIG_SENSORS_LT3074) += lt3074.o
obj-$(CONFIG_SENSORS_LT7182S) += lt7182s.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 40b0dda32ea6..dd7275a67a0a 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -437,7 +437,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
#if IS_ENABLED(CONFIG_SENSORS_LM25066_REGULATOR)
static const struct regulator_desc lm25066_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
#endif
diff --git a/drivers/hwmon/pmbus/lt3074.c b/drivers/hwmon/pmbus/lt3074.c
new file mode 100644
index 000000000000..3704dbe7b54a
--- /dev/null
+++ b/drivers/hwmon/pmbus/lt3074.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware monitoring driver for Analog Devices LT3074
+ *
+ * Copyright (C) 2025 Analog Devices, Inc.
+ */
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define LT3074_MFR_READ_VBIAS 0xc6
+#define LT3074_MFR_BIAS_OV_WARN_LIMIT 0xc7
+#define LT3074_MFR_BIAS_UV_WARN_LIMIT 0xc8
+#define LT3074_MFR_SPECIAL_ID 0xe7
+
+#define LT3074_SPECIAL_ID_VALUE 0x1c1d
+
+static const struct regulator_desc __maybe_unused lt3074_reg_desc[] = {
+ PMBUS_REGULATOR_ONE("regulator"),
+};
+
+static int lt3074_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_READ_VBIAS);
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_BIAS_UV_WARN_LIMIT);
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_BIAS_OV_WARN_LIMIT);
+ default:
+ return -ENODATA;
+ }
+}
+
+static int lt3074_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ switch (reg) {
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ return pmbus_write_word_data(client, 0,
+ LT3074_MFR_BIAS_UV_WARN_LIMIT,
+ word);
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ return pmbus_write_word_data(client, 0,
+ LT3074_MFR_BIAS_OV_WARN_LIMIT,
+ word);
+ default:
+ return -ENODATA;
+ }
+}
+
+static struct pmbus_driver_info lt3074_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_VMON |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = lt3074_read_word_data,
+ .write_word_data = lt3074_write_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_LT3074_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = lt3074_reg_desc,
+#endif
+};
+
+static int lt3074_probe(struct i2c_client *client)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_word_data(client, LT3074_MFR_SPECIAL_ID);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read ID\n");
+
+ if (ret != LT3074_SPECIAL_ID_VALUE)
+ return dev_err_probe(dev, -ENODEV, "ID mismatch\n");
+
+ return pmbus_do_probe(client, &lt3074_info);
+}
+
+static const struct i2c_device_id lt3074_id[] = {
+ { "lt3074", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lt3074_id);
+
+static const struct of_device_id __maybe_unused lt3074_of_match[] = {
+ { .compatible = "adi,lt3074" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lt3074_of_match);
+
+static struct i2c_driver lt3074_driver = {
+ .driver = {
+ .name = "lt3074",
+ .of_match_table = of_match_ptr(lt3074_of_match),
+ },
+ .probe = lt3074_probe,
+ .id_table = lt3074_id,
+};
+module_i2c_driver(lt3074_driver);
+
+MODULE_AUTHOR("Cedric Encarnacion <cedricjustine.encarnacion@analog.com>");
+MODULE_DESCRIPTION("PMBus driver for Analog Devices LT3074");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index c9dda33831ff..56834d26f8ef 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -12,9 +12,26 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/delay.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
+enum chips {
+ adpm12160,
+ max34440,
+ max34441,
+ max34446,
+ max34451,
+ max34460,
+ max34461,
+};
+
+/*
+ * Firmware is sometimes not ready if we try and read the
+ * data from the page immediately after setting. Maxim
+ * recommends 50us delay due to the chip failing to clock
+ * stretch long enough here.
+ */
+#define MAX34440_PAGE_CHANGE_DELAY 50
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -34,16 +51,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
/*
* The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
* swapped from the standard pmbus spec addresses.
+ * For max34451, version MAX34451ETNA6+ and later has this issue fixed.
*/
#define MAX34440_IOUT_OC_WARN_LIMIT 0x46
#define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A
+#define MAX34451ETNA6_MFR_REV 0x0012
+
#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
struct max34440_data {
int id;
struct pmbus_driver_info info;
+ u8 iout_oc_warn_limit;
+ u8 iout_oc_fault_limit;
};
#define to_max34440_data(x) container_of(x, struct max34440_data, info)
@@ -60,11 +82,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
- MAX34440_IOUT_OC_FAULT_LIMIT);
+ data->iout_oc_fault_limit);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
- MAX34440_IOUT_OC_WARN_LIMIT);
+ data->iout_oc_warn_limit);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page, phase,
@@ -75,7 +97,8 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
MAX34440_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_AVG:
- if (data->id != max34446 && data->id != max34451)
+ if (data->id != max34446 && data->id != max34451 &&
+ data->id != adpm12160)
return -ENXIO;
ret = pmbus_read_word_data(client, page, phase,
MAX34446_MFR_IOUT_AVG);
@@ -133,11 +156,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
+ ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit,
word);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
+ ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit,
word);
break;
case PMBUS_VIRT_RESET_POUT_HISTORY:
@@ -159,7 +182,8 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_IOUT_PEAK, 0);
- if (!ret && (data->id == max34446 || data->id == max34451))
+ if (!ret && (data->id == max34446 || data->id == max34451 ||
+ data->id == adpm12160))
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_IOUT_AVG, 0);
@@ -235,9 +259,29 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
*/
int page, rv;
+ bool max34451_na6 = false;
+
+ rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION);
+ if (rv < 0)
+ return rv;
+
+ if (rv >= MAX34451ETNA6_MFR_REV) {
+ max34451_na6 = true;
+ data->info.format[PSC_VOLTAGE_IN] = direct;
+ data->info.format[PSC_CURRENT_IN] = direct;
+ data->info.m[PSC_VOLTAGE_IN] = 1;
+ data->info.b[PSC_VOLTAGE_IN] = 0;
+ data->info.R[PSC_VOLTAGE_IN] = 3;
+ data->info.m[PSC_CURRENT_IN] = 1;
+ data->info.b[PSC_CURRENT_IN] = 0;
+ data->info.R[PSC_CURRENT_IN] = 2;
+ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
+ }
for (page = 0; page < 16; page++) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ fsleep(MAX34440_PAGE_CHANGE_DELAY);
if (rv < 0)
return rv;
@@ -251,16 +295,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
case 0x20:
data->info.func[page] = PMBUS_HAVE_VOUT |
PMBUS_HAVE_STATUS_VOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_VIN |
+ PMBUS_HAVE_STATUS_INPUT;
break;
case 0x21:
data->info.func[page] = PMBUS_HAVE_VOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_VIN;
break;
case 0x22:
data->info.func[page] = PMBUS_HAVE_IOUT |
PMBUS_HAVE_STATUS_IOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_IIN |
+ PMBUS_HAVE_STATUS_INPUT;
break;
case 0x23:
data->info.func[page] = PMBUS_HAVE_IOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_IIN;
break;
default:
break;
@@ -271,6 +329,41 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
}
static struct pmbus_driver_info max34440_info[] = {
+ [adpm12160] = {
+ .pages = 19,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .m[PSC_CURRENT_IN] = 1,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 2,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ /* absent func below [18] are not for monitoring */
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[4] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[6] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[7] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[8] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[9] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[10] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
[max34440] = {
.pages = 14,
.format[PSC_VOLTAGE_IN] = direct,
@@ -312,6 +405,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34441] = {
.pages = 12,
@@ -355,6 +449,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34446] = {
.pages = 7,
@@ -392,6 +487,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34451] = {
.pages = 21,
@@ -415,6 +511,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34460] = {
.pages = 18,
@@ -445,6 +542,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34461] = {
.pages = 23,
@@ -480,6 +578,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
};
@@ -494,17 +593,23 @@ static int max34440_probe(struct i2c_client *client)
return -ENOMEM;
data->id = i2c_match_id(max34440_id, client)->driver_data;
data->info = max34440_info[data->id];
+ data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT;
if (data->id == max34451) {
rv = max34451_set_supported_funcs(client, data);
if (rv)
return rv;
+ } else if (data->id == adpm12160) {
+ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
}
return pmbus_do_probe(client, &data->info);
}
static const struct i2c_device_id max34440_id[] = {
+ {"adpm12160", adpm12160},
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c
index c1e2d0cb2fd0..8f10e37a7a76 100644
--- a/drivers/hwmon/pmbus/mpq7932.c
+++ b/drivers/hwmon/pmbus/mpq7932.c
@@ -51,8 +51,8 @@ static const struct regulator_desc mpq7932_regulators_desc[] = {
};
static const struct regulator_desc mpq7932_regulators_desc_one[] = {
- PMBUS_REGULATOR_STEP_ONE("buck", MPQ7932_N_VOLTAGES,
- MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+ PMBUS_REGULATOR_STEP_ONE_NODE("buck", MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
};
#endif
diff --git a/drivers/hwmon/pmbus/mpq8785.c b/drivers/hwmon/pmbus/mpq8785.c
index 331c274ca892..1f56aaf4dde8 100644
--- a/drivers/hwmon/pmbus/mpq8785.c
+++ b/drivers/hwmon/pmbus/mpq8785.c
@@ -4,10 +4,23 @@
*/
#include <linux/i2c.h>
+#include <linux/bitops.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/of_device.h>
#include "pmbus.h"
+#define MPM82504_READ_TEMPERATURE_1_SIGN_POS 9
+
+enum chips { mpm3695, mpm3695_25, mpm82504, mpq8785 };
+
+static u16 voltage_scale_loop_max_val[] = {
+ [mpm3695] = GENMASK(9, 0),
+ [mpm3695_25] = GENMASK(11, 0),
+ [mpm82504] = GENMASK(9, 0),
+ [mpq8785] = GENMASK(10, 0),
+};
+
static int mpq8785_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
@@ -34,6 +47,20 @@ static int mpq8785_identify(struct i2c_client *client,
return 0;
};
+static int mpm82504_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int ret;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+
+ if (ret < 0 || reg != PMBUS_READ_TEMPERATURE_1)
+ return ret;
+
+ /* Fix PMBUS_READ_TEMPERATURE_1 signedness */
+ return sign_extend32(ret, MPM82504_READ_TEMPERATURE_1_SIGN_POS) & 0xffff;
+}
+
static struct pmbus_driver_info mpq8785_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = direct,
@@ -53,26 +80,74 @@ static struct pmbus_driver_info mpq8785_info = {
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .identify = mpq8785_identify,
-};
-
-static int mpq8785_probe(struct i2c_client *client)
-{
- return pmbus_do_probe(client, &mpq8785_info);
};
static const struct i2c_device_id mpq8785_id[] = {
- { "mpq8785" },
+ { "mpm3695", mpm3695 },
+ { "mpm3695-25", mpm3695_25 },
+ { "mpm82504", mpm82504 },
+ { "mpq8785", mpq8785 },
{ },
};
MODULE_DEVICE_TABLE(i2c, mpq8785_id);
static const struct of_device_id __maybe_unused mpq8785_of_match[] = {
- { .compatible = "mps,mpq8785" },
+ { .compatible = "mps,mpm3695", .data = (void *)mpm3695 },
+ { .compatible = "mps,mpm3695-25", .data = (void *)mpm3695_25 },
+ { .compatible = "mps,mpm82504", .data = (void *)mpm82504 },
+ { .compatible = "mps,mpq8785", .data = (void *)mpq8785 },
{}
};
MODULE_DEVICE_TABLE(of, mpq8785_of_match);
+static int mpq8785_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ enum chips chip_id;
+ u32 voltage_scale;
+ int ret;
+
+ info = devm_kmemdup(dev, &mpq8785_info, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (dev->of_node)
+ chip_id = (kernel_ulong_t)of_device_get_match_data(dev);
+ else
+ chip_id = (kernel_ulong_t)i2c_get_match_data(client);
+
+ switch (chip_id) {
+ case mpm3695:
+ case mpm3695_25:
+ case mpm82504:
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->m[PSC_VOLTAGE_OUT] = 8;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = 2;
+ info->read_word_data = mpm82504_read_word_data;
+ break;
+ case mpq8785:
+ info->identify = mpq8785_identify;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (!device_property_read_u32(dev, "mps,vout-fb-divider-ratio-permille",
+ &voltage_scale)) {
+ if (voltage_scale > voltage_scale_loop_max_val[chip_id])
+ return -EINVAL;
+
+ ret = i2c_smbus_write_word_data(client, PMBUS_VOUT_SCALE_LOOP,
+ voltage_scale);
+ if (ret)
+ return ret;
+ }
+
+ return pmbus_do_probe(client, info);
+};
+
static struct i2c_driver mpq8785_driver = {
.driver = {
.name = "mpq8785",
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index ddb19c9726d6..d2e9bfb5320f 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -482,6 +482,7 @@ struct pmbus_driver_info {
*/
int access_delay; /* in microseconds */
int write_delay; /* in microseconds */
+ int page_change_delay; /* in microseconds */
};
/* Regulator ops */
@@ -508,11 +509,11 @@ int pmbus_regulator_init_cb(struct regulator_dev *rdev,
#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0, 0)
-#define PMBUS_REGULATOR_STEP_ONE(_name, _voltages, _step, _min_uV) \
+#define __PMBUS_REGULATOR_STEP_ONE(_name, _node, _voltages, _step, _min_uV) \
{ \
.name = (_name), \
.of_match = of_match_ptr(_name), \
- .regulators_node = of_match_ptr("regulators"), \
+ .regulators_node = of_match_ptr(_node), \
.ops = &pmbus_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -522,7 +523,19 @@ int pmbus_regulator_init_cb(struct regulator_dev *rdev,
.init_cb = pmbus_regulator_init_cb, \
}
-#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
+/*
+ * _NODE macros are defined for historic reasons and MUST NOT be used in new
+ * drivers.
+ */
+#define PMBUS_REGULATOR_STEP_ONE_NODE(_name, _voltages, _step, _min_uV) \
+ __PMBUS_REGULATOR_STEP_ONE(_name, "regulators", _voltages, _step, _min_uV)
+
+#define PMBUS_REGULATOR_ONE_NODE(_name) PMBUS_REGULATOR_STEP_ONE_NODE(_name, 0, 0, 0)
+
+#define PMBUS_REGULATOR_STEP_ONE(_name, _voltages, _step, _min_uV) \
+ __PMBUS_REGULATOR_STEP_ONE(_name, NULL, _voltages, _step, _min_uV)
+
+#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
/* Function declarations */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index cfeba2e4c5c3..be6d05def115 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -32,6 +32,13 @@
#define PMBUS_ATTR_ALLOC_SIZE 32
#define PMBUS_NAME_SIZE 24
+/*
+ * The type of operation used for picking the delay between
+ * successive pmbus operations.
+ */
+#define PMBUS_OP_WRITE BIT(0)
+#define PMBUS_OP_PAGE_CHANGE BIT(1)
+
static int wp = -1;
module_param(wp, int, 0444);
@@ -113,8 +120,8 @@ struct pmbus_data {
int vout_low[PMBUS_PAGES]; /* voltage low margin */
int vout_high[PMBUS_PAGES]; /* voltage high margin */
- ktime_t write_time; /* Last SMBUS write timestamp */
- ktime_t access_time; /* Last SMBUS access timestamp */
+
+ ktime_t next_access_backoff; /* Wait until at least this time */
};
struct pmbus_debugfs_entry {
@@ -169,32 +176,26 @@ EXPORT_SYMBOL_NS_GPL(pmbus_set_update, "PMBUS");
static void pmbus_wait(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
- const struct pmbus_driver_info *info = data->info;
- s64 delta;
+ s64 delay = ktime_us_delta(data->next_access_backoff, ktime_get());
- if (info->access_delay) {
- delta = ktime_us_delta(ktime_get(), data->access_time);
-
- if (delta < info->access_delay)
- fsleep(info->access_delay - delta);
- } else if (info->write_delay) {
- delta = ktime_us_delta(ktime_get(), data->write_time);
-
- if (delta < info->write_delay)
- fsleep(info->write_delay - delta);
- }
+ if (delay > 0)
+ fsleep(delay);
}
-/* Sets the last accessed timestamp for pmbus_wait */
-static void pmbus_update_ts(struct i2c_client *client, bool write_op)
+/* Sets the last operation timestamp for pmbus_wait */
+static void pmbus_update_ts(struct i2c_client *client, int op)
{
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
+ int delay = info->access_delay;
+
+ if (op & PMBUS_OP_WRITE)
+ delay = max(delay, info->write_delay);
+ if (op & PMBUS_OP_PAGE_CHANGE)
+ delay = max(delay, info->page_change_delay);
- if (info->access_delay)
- data->access_time = ktime_get();
- else if (info->write_delay && write_op)
- data->write_time = ktime_get();
+ if (delay > 0)
+ data->next_access_backoff = ktime_add_us(ktime_get(), delay);
}
int pmbus_set_page(struct i2c_client *client, int page, int phase)
@@ -209,13 +210,13 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
data->info->pages > 1 && page != data->currpage) {
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE | PMBUS_OP_PAGE_CHANGE);
if (rv < 0)
return rv;
pmbus_wait(client);
rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (rv < 0)
return rv;
@@ -229,7 +230,7 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PHASE,
phase);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
if (rv)
return rv;
}
@@ -249,7 +250,7 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
pmbus_wait(client);
rv = i2c_smbus_write_byte(client, value);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -284,7 +285,7 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
pmbus_wait(client);
rv = i2c_smbus_write_word_data(client, reg, word);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -405,7 +406,7 @@ int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg)
pmbus_wait(client);
rv = i2c_smbus_read_word_data(client, reg);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -468,7 +469,7 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
pmbus_wait(client);
rv = i2c_smbus_read_byte_data(client, reg);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -484,7 +485,7 @@ int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, reg, value);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -520,7 +521,7 @@ static int pmbus_read_block_data(struct i2c_client *client, int page, u8 reg,
pmbus_wait(client);
rv = i2c_smbus_read_block_data(client, reg, data_buf);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -2524,7 +2525,7 @@ static int pmbus_read_coefficients(struct i2c_client *client,
rv = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, PMBUS_COEFFICIENTS,
I2C_SMBUS_BLOCK_PROC_CALL, &data);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
if (rv < 0)
return rv;
@@ -2728,7 +2729,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
if (!(data->flags & PMBUS_NO_CAPABILITY)) {
pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) {
if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC))
@@ -2744,13 +2745,13 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
data->read_status = pmbus_read_status_word;
pmbus_wait(client);
ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret < 0 || ret == 0xffff) {
data->read_status = pmbus_read_status_byte;
pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret < 0 || ret == 0xff) {
dev_err(dev, "PMBus status register not found\n");
diff --git a/drivers/hwmon/pmbus/tda38640.c b/drivers/hwmon/pmbus/tda38640.c
index 07fe58c24485..d902d39f49f4 100644
--- a/drivers/hwmon/pmbus/tda38640.c
+++ b/drivers/hwmon/pmbus/tda38640.c
@@ -15,7 +15,7 @@
#include "pmbus.h"
static const struct regulator_desc __maybe_unused tda38640_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
struct tda38640_data {
diff --git a/drivers/hwmon/pmbus/tps25990.c b/drivers/hwmon/pmbus/tps25990.c
index 0d2655e69549..c13edd7e1abf 100644
--- a/drivers/hwmon/pmbus/tps25990.c
+++ b/drivers/hwmon/pmbus/tps25990.c
@@ -333,7 +333,7 @@ static int tps25990_write_byte_data(struct i2c_client *client,
#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
static const struct regulator_desc tps25990_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
#endif
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 9b0eadc81a2e..2bc8cccb01fd 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -212,8 +212,8 @@ static int ucd9000_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(ret & UCD9000_GPIO_CONFIG_STATUS);
}
-static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct i2c_client *client = gpiochip_get_data(gc);
int ret;
@@ -222,17 +222,17 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0) {
dev_dbg(&client->dev, "failed to read GPIO %d config: %d\n",
offset, ret);
- return;
+ return ret;
}
if (value) {
if (ret & UCD9000_GPIO_CONFIG_STATUS)
- return;
+ return 0;
ret |= UCD9000_GPIO_CONFIG_STATUS;
} else {
if (!(ret & UCD9000_GPIO_CONFIG_STATUS))
- return;
+ return 0;
ret &= ~UCD9000_GPIO_CONFIG_STATUS;
}
@@ -244,7 +244,7 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0) {
dev_dbg(&client->dev, "Failed to write GPIO %d config: %d\n",
offset, ret);
- return;
+ return ret;
}
ret &= ~UCD9000_GPIO_CONFIG_ENABLE;
@@ -253,6 +253,8 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0)
dev_dbg(&client->dev, "Failed to write GPIO %d config: %d\n",
offset, ret);
+
+ return ret;
}
static int ucd9000_gpio_get_direction(struct gpio_chip *gc,
@@ -362,7 +364,7 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
data->gpio.direction_input = ucd9000_gpio_direction_input;
data->gpio.direction_output = ucd9000_gpio_direction_output;
data->gpio.get = ucd9000_gpio_get;
- data->gpio.set = ucd9000_gpio_set;
+ data->gpio.set_rv = ucd9000_gpio_set;
data->gpio.can_sleep = true;
data->gpio.base = -1;
data->gpio.parent = &client->dev;
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index d506a5e7e033..2df294793f6e 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -620,8 +620,8 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (tach->irq == -EPROBE_DEFER)
return tach->irq;
if (tach->irq > 0) {
- ret = devm_request_irq(dev, tach->irq, pulse_handler, 0,
- pdev->name, tach);
+ ret = devm_request_irq(dev, tach->irq, pulse_handler,
+ IRQF_NO_THREAD, pdev->name, tach);
if (ret) {
dev_err(dev,
"Failed to request interrupt: %d\n",
diff --git a/drivers/hwmon/qnap-mcu-hwmon.c b/drivers/hwmon/qnap-mcu-hwmon.c
index 29057514739c..e86e64c4d391 100644
--- a/drivers/hwmon/qnap-mcu-hwmon.c
+++ b/drivers/hwmon/qnap-mcu-hwmon.c
@@ -6,7 +6,6 @@
* Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
*/
-#include <linux/fwnode.h>
#include <linux/hwmon.h>
#include <linux/mfd/qnap-mcu.h>
#include <linux/module.h>
diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c
index 358152868d96..5da44571b6a0 100644
--- a/drivers/hwmon/spd5118.c
+++ b/drivers/hwmon/spd5118.c
@@ -66,6 +66,9 @@ static const unsigned short normal_i2c[] = {
#define SPD5118_EEPROM_BASE 0x80
#define SPD5118_EEPROM_SIZE (SPD5118_PAGE_SIZE * SPD5118_NUM_PAGES)
+#define PAGE_ADDR0(page) (((page) & BIT(0)) << 6)
+#define PAGE_ADDR1_4(page) (((page) & GENMASK(4, 1)) >> 1)
+
/* Temperature unit in millicelsius */
#define SPD5118_TEMP_UNIT (MILLIDEGREE_PER_DEGREE / 4)
/* Representable temperature range in millicelsius */
@@ -75,6 +78,7 @@ static const unsigned short normal_i2c[] = {
struct spd5118_data {
struct regmap *regmap;
struct mutex nvmem_lock;
+ bool is_16bit;
};
/* hwmon */
@@ -305,51 +309,6 @@ static bool spd5118_vendor_valid(u8 bank, u8 id)
return id && id != 0x7f;
}
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
- int regval;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
-
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval != 0x5118)
- return -ENODEV;
-
- regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
- if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY);
- if (regval < 0)
- return -ENODEV;
- if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR);
- if (regval)
- return -ENODEV;
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR);
- if (regval)
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION);
- if (regval < 0 || (regval & 0xc1))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG);
- if (regval < 0)
- return -ENODEV;
- if (regval & ~SPD5118_TS_DISABLE)
- return -ENODEV;
-
- strscpy(info->type, "spd5118", I2C_NAME_SIZE);
- return 0;
-}
-
static const struct hwmon_channel_info *spd5118_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_REGISTER_TZ),
@@ -376,11 +335,12 @@ static const struct hwmon_chip_info spd5118_chip_info = {
/* nvmem */
-static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf,
+static ssize_t spd5118_nvmem_read_page(struct spd5118_data *data, char *buf,
unsigned int offset, size_t count)
{
- int addr = (offset >> SPD5118_PAGE_SHIFT) * 0x100 + SPD5118_EEPROM_BASE;
- int err;
+ int page = offset >> SPD5118_PAGE_SHIFT;
+ struct regmap *regmap = data->regmap;
+ int err, addr;
offset &= SPD5118_PAGE_MASK;
@@ -388,6 +348,12 @@ static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf,
if (offset + count > SPD5118_PAGE_SIZE)
count = SPD5118_PAGE_SIZE - offset;
+ if (data->is_16bit) {
+ addr = SPD5118_EEPROM_BASE | PAGE_ADDR0(page) |
+ (PAGE_ADDR1_4(page) << 8);
+ } else {
+ addr = page * 0x100 + SPD5118_EEPROM_BASE;
+ }
err = regmap_bulk_read(regmap, addr + offset, buf, count);
if (err)
return err;
@@ -410,7 +376,7 @@ static int spd5118_nvmem_read(void *priv, unsigned int off, void *val, size_t co
mutex_lock(&data->nvmem_lock);
while (count) {
- ret = spd5118_nvmem_read_page(data->regmap, buf, off, count);
+ ret = spd5118_nvmem_read_page(data, buf, off, count);
if (ret < 0) {
mutex_unlock(&data->nvmem_lock);
return ret;
@@ -483,7 +449,7 @@ static bool spd5118_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = {
+static const struct regmap_range_cfg spd5118_i2c_regmap_range_cfg[] = {
{
.selector_reg = SPD5118_REG_I2C_LEGACY_MODE,
.selector_mask = SPD5118_LEGACY_PAGE_MASK,
@@ -495,7 +461,7 @@ static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = {
},
};
-static const struct regmap_config spd5118_regmap_config = {
+static const struct regmap_config spd5118_regmap8_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x7ff,
@@ -503,89 +469,76 @@ static const struct regmap_config spd5118_regmap_config = {
.volatile_reg = spd5118_volatile_reg,
.cache_type = REGCACHE_MAPLE,
- .ranges = spd5118_regmap_range_cfg,
- .num_ranges = ARRAY_SIZE(spd5118_regmap_range_cfg),
+ .ranges = spd5118_i2c_regmap_range_cfg,
+ .num_ranges = ARRAY_SIZE(spd5118_i2c_regmap_range_cfg),
};
-static int spd5118_init(struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
- int err, regval, mode;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+static const struct regmap_config spd5118_regmap16_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x7ff,
+ .writeable_reg = spd5118_writeable_reg,
+ .volatile_reg = spd5118_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
+};
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval < 0 || (regval && regval != 0x5118))
- return -ENODEV;
+static int spd5118_suspend(struct device *dev)
+{
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u32 regval;
+ int err;
/*
- * If the device type registers return 0, it is possible that the chip
- * has a non-zero page selected and takes the specification literally,
- * i.e. disables access to volatile registers besides the page register
- * if the page is not 0. Try to identify such chips.
+ * Make sure the configuration register in the regmap cache is current
+ * before bypassing it.
*/
- if (!regval) {
- /* Vendor ID registers must also be 0 */
- regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
- if (regval)
- return -ENODEV;
-
- /* The selected page in MR11 must not be 0 */
- mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
- if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) ||
- !(mode & SPD5118_LEGACY_PAGE_MASK))
- return -ENODEV;
+ err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
+ if (err < 0)
+ return err;
- err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE,
- mode & SPD5118_LEGACY_MODE_ADDR);
- if (err)
- return -ENODEV;
+ regcache_cache_bypass(regmap, true);
+ regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE,
+ SPD5118_TS_DISABLE);
+ regcache_cache_bypass(regmap, false);
- /*
- * If the device type registers are still bad after selecting
- * page 0, this is not a SPD5118 device. Restore original
- * legacy mode register value and abort.
- */
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval != 0x5118) {
- i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode);
- return -ENODEV;
- }
- }
+ regcache_cache_only(regmap, true);
+ regcache_mark_dirty(regmap);
- /* We are reasonably sure that this is really a SPD5118 hub controller */
return 0;
}
-static int spd5118_probe(struct i2c_client *client)
+static int spd5118_resume(struct device *dev)
{
- struct device *dev = &client->dev;
- unsigned int regval, revision, vendor, bank;
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+
+ regcache_cache_only(regmap, false);
+ return regcache_sync(regmap);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
+
+static int spd5118_common_probe(struct device *dev, struct regmap *regmap,
+ bool is_16bit)
+{
+ unsigned int capability, revision, vendor, bank;
struct spd5118_data *data;
struct device *hwmon_dev;
- struct regmap *regmap;
int err;
- err = spd5118_init(client);
- if (err)
- return err;
-
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- regmap = devm_regmap_init_i2c(client, &spd5118_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n");
-
- err = regmap_read(regmap, SPD5118_REG_CAPABILITY, &regval);
+ err = regmap_read(regmap, SPD5118_REG_CAPABILITY, &capability);
if (err)
return err;
- if (!(regval & SPD5118_CAP_TS_SUPPORT))
+ if (!(capability & SPD5118_CAP_TS_SUPPORT))
return -ENODEV;
+ data->is_16bit = is_16bit;
+
err = regmap_read(regmap, SPD5118_REG_REVISION, &revision);
if (err)
return err;
@@ -627,48 +580,176 @@ static int spd5118_probe(struct i2c_client *client)
return 0;
}
-static int spd5118_suspend(struct device *dev)
+/* I2C */
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info)
{
- struct spd5118_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
- u32 regval;
- int err;
+ struct i2c_adapter *adapter = client->adapter;
+ int regval;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY);
+ if (regval < 0)
+ return -ENODEV;
+ if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR);
+ if (regval)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR);
+ if (regval)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION);
+ if (regval < 0 || (regval & 0xc1))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG);
+ if (regval < 0)
+ return -ENODEV;
+ if (regval & ~SPD5118_TS_DISABLE)
+ return -ENODEV;
+
+ strscpy(info->type, "spd5118", I2C_NAME_SIZE);
+ return 0;
+}
+
+static int spd5118_i2c_init(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int err, regval, mode;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval < 0 || (regval && regval != 0x5118))
+ return -ENODEV;
/*
- * Make sure the configuration register in the regmap cache is current
- * before bypassing it.
+ * If the device type registers return 0, it is possible that the chip
+ * has a non-zero page selected and takes the specification literally,
+ * i.e. disables access to volatile registers besides the page register
+ * if the page is not 0. The Renesas/ITD SPD5118 Hub Controller is known
+ * to show this behavior. Try to identify such chips.
*/
- err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
- if (err < 0)
- return err;
+ if (!regval) {
+ /* Vendor ID registers must also be 0 */
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval)
+ return -ENODEV;
- regcache_cache_bypass(regmap, true);
- regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE,
- SPD5118_TS_DISABLE);
- regcache_cache_bypass(regmap, false);
+ /* The selected page in MR11 must not be 0 */
+ mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
+ if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) ||
+ !(mode & SPD5118_LEGACY_PAGE_MASK))
+ return -ENODEV;
- regcache_cache_only(regmap, true);
- regcache_mark_dirty(regmap);
+ err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE,
+ mode & SPD5118_LEGACY_MODE_ADDR);
+ if (err)
+ return -ENODEV;
+ /*
+ * If the device type registers are still bad after selecting
+ * page 0, this is not a SPD5118 device. Restore original
+ * legacy mode register value and abort.
+ */
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118) {
+ i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode);
+ return -ENODEV;
+ }
+ }
+
+ /* We are reasonably sure that this is really a SPD5118 hub controller */
return 0;
}
-static int spd5118_resume(struct device *dev)
+/*
+ * 16-bit addressing note:
+ *
+ * If I2C_FUNC_I2C is not supported by an I2C adapter driver, regmap uses
+ * SMBus operations as alternative. To simulate a read operation with a 16-bit
+ * address, it writes the address using i2c_smbus_write_byte_data(), followed
+ * by one or more calls to i2c_smbus_read_byte() to read the data.
+ * Per spd5118 standard, a read operation after writing the address must start
+ * with <Sr> (Repeat Start). However, a SMBus read byte operation starts with
+ * <S> (Start). This resets the register address in the spd5118 chip. As result,
+ * i2c_smbus_read_byte() always returns data from register address 0x00.
+ *
+ * A working alternative to access chips with 16-bit register addresses in the
+ * absence of I2C_FUNC_I2C support is not known.
+ *
+ * For this reason, 16-bit addressing can only be supported with I2C if the
+ * adapter supports I2C_FUNC_I2C.
+ *
+ * For I2C, the addressing mode selected by the BIOS must not be changed.
+ * Experiments show that at least some PC BIOS versions will not change the
+ * addressing mode on a soft reboot and end up in setup, claiming that some
+ * configuration change happened. This will happen again after a power cycle,
+ * which does reset the addressing mode. To prevent this from happening,
+ * detect if 16-bit addressing is enabled and always use the currently
+ * configured addressing mode.
+ */
+
+static int spd5118_i2c_probe(struct i2c_client *client)
{
- struct spd5118_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
+ const struct regmap_config *config;
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+ int err, mode;
+ bool is_16bit;
- regcache_cache_only(regmap, false);
- return regcache_sync(regmap);
-}
+ err = spd5118_i2c_init(client);
+ if (err)
+ return err;
-static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
+ mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
+ if (mode < 0)
+ return mode;
+
+ is_16bit = mode & SPD5118_LEGACY_MODE_ADDR;
+ if (is_16bit) {
+ /*
+ * See 16-bit addressing note above explaining why it is
+ * necessary to check for I2C_FUNC_I2C support here.
+ */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "Adapter does not support 16-bit register addresses\n");
+ return -ENODEV;
+ }
+ config = &spd5118_regmap16_config;
+ } else {
+ config = &spd5118_regmap8_config;
+ }
+
+ regmap = devm_regmap_init_i2c(client, config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n");
+
+ return spd5118_common_probe(dev, regmap, is_16bit);
+}
-static const struct i2c_device_id spd5118_id[] = {
+static const struct i2c_device_id spd5118_i2c_id[] = {
{ "spd5118" },
{ }
};
-MODULE_DEVICE_TABLE(i2c, spd5118_id);
+MODULE_DEVICE_TABLE(i2c, spd5118_i2c_id);
static const struct of_device_id spd5118_of_ids[] = {
{ .compatible = "jedec,spd5118", },
@@ -676,20 +757,20 @@ static const struct of_device_id spd5118_of_ids[] = {
};
MODULE_DEVICE_TABLE(of, spd5118_of_ids);
-static struct i2c_driver spd5118_driver = {
+static struct i2c_driver spd5118_i2c_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "spd5118",
.of_match_table = spd5118_of_ids,
.pm = pm_sleep_ptr(&spd5118_pm_ops),
},
- .probe = spd5118_probe,
- .id_table = spd5118_id,
+ .probe = spd5118_i2c_probe,
+ .id_table = spd5118_i2c_id,
.detect = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? spd5118_detect : NULL,
.address_list = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? normal_i2c : NULL,
};
-module_i2c_driver(spd5118_driver);
+module_i2c_driver(spd5118_i2c_driver);
MODULE_AUTHOR("René Rebe <rene@exactcode.de>");
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 8af44a33055f..a02daa496c9c 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/of.h>
#define DRIVER_NAME "tmp102"
@@ -204,6 +205,10 @@ static int tmp102_probe(struct i2c_client *client)
return -ENODEV;
}
+ err = devm_regulator_get_enable_optional(dev, "vcc");
+ if (err < 0 && err != -ENODEV)
+ return dev_err_probe(dev, err, "Failed to enable regulator\n");
+
tmp102 = devm_kzalloc(dev, sizeof(*tmp102), GFP_KERNEL);
if (!tmp102)
return -ENOMEM;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 2cdbd5f107a2..11c5d80428cd 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -103,8 +103,6 @@ struct xgene_hwmon_dev {
struct device *hwmon_dev;
bool temp_critical_alarm;
- phys_addr_t comm_base_addr;
- void *pcc_comm_addr;
unsigned int usecs_lat;
};
@@ -125,7 +123,8 @@ static u16 xgene_word_tst_and_clr(u16 *addr, u16 mask)
static int xgene_hwmon_pcc_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
{
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
u32 *ptr = (void *)(generic_comm_base + 1);
int rc, i;
u16 val;
@@ -523,7 +522,8 @@ static void xgene_hwmon_rx_cb(struct mbox_client *cl, void *msg)
static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
{
struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
struct slimpro_resp_msg amsg;
/*
@@ -649,7 +649,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
} else {
struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
- int version;
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
@@ -658,8 +657,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out_mbox_free;
}
- version = (int)acpi_id->driver_data;
-
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
dev_err(&pdev->dev, "no pcc-channel property\n");
@@ -686,34 +683,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
/*
- * This is the shared communication region
- * for the OS and Platform to communicate over.
- */
- ctx->comm_base_addr = pcc_chan->shmem_base_addr;
- if (ctx->comm_base_addr) {
- if (version == XGENE_HWMON_V2)
- ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
- ctx->comm_base_addr,
- pcc_chan->shmem_size);
- else
- ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
- } else {
- dev_err(&pdev->dev, "Failed to get PCC comm region\n");
- rc = -ENODEV;
- goto out;
- }
-
- if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
- dev_err(&pdev->dev,
- "Failed to ioremap PCC comm region\n");
- rc = -ENOMEM;
- goto out;
- }
-
- /*
* pcc_chan->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 1008858f78e2..c066a4da7c14 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -105,6 +105,8 @@
#define PKT_XBE2_FW_5_EARLY 3
#define PKT_XBE2_FW_5_11 4
+#define FLAG_DELAY_INIT BIT(0)
+
static bool dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
@@ -127,6 +129,7 @@ static const struct xpad_device {
char *name;
u8 mapping;
u8 xtype;
+ u8 flags;
} xpad_device[] = {
/* Please keep this list sorted by vendor and product ID. */
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
@@ -416,6 +419,7 @@ static const struct xpad_device {
{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
{ 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
+ { 0x366c, 0x0005, "ByoWave Proteus Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE, FLAG_DELAY_INIT },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -571,6 +575,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x366c), /* ByoWave controllers */
XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
{ }
};
@@ -599,6 +604,7 @@ struct xboxone_init_packet {
* - https://github.com/medusalix/xone/blob/master/bus/protocol.c
*/
#define GIP_CMD_ACK 0x01
+#define GIP_CMD_ANNOUNCE 0x02
#define GIP_CMD_IDENTIFY 0x04
#define GIP_CMD_POWER 0x05
#define GIP_CMD_AUTHENTICATE 0x06
@@ -673,20 +679,19 @@ static const u8 xboxone_hori_ack_id[] = {
};
/*
- * This packet is required for most (all?) of the PDP pads to start
- * sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
+ * This packet is sent by default on Windows, and is required for some pads to
+ * start sending input reports, including most (all?) of the PDP. These pads
+ * include: (0x0e6f:0x02ab), (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_led_on[] = {
- GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14
-};
+static const u8 xboxone_led_on[] = { GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0,
+GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14 };
/*
* This packet is required for most (all?) of the PDP pads to start
* sending input reports. These pads include: (0x0e6f:0x02ab),
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_auth[] = {
+static const u8 xboxone_auth_done[] = {
GIP_CMD_AUTHENTICATE, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(2), 0x01, 0x00
};
@@ -723,12 +728,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
- XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_auth),
- XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_led_on),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_auth_done),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
@@ -788,10 +789,13 @@ struct usb_xpad {
const char *name; /* name of the device */
struct work_struct work; /* init/remove device from callback */
time64_t mode_btn_down_ts;
+ bool delay_init; /* init packets should be delayed */
+ bool delayed_init_done;
};
static int xpad_init_input(struct usb_xpad *xpad);
static void xpad_deinit_input(struct usb_xpad *xpad);
+static int xpad_start_input(struct usb_xpad *xpad);
static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num);
static void xpad360w_poweroff_controller(struct usb_xpad *xpad);
@@ -1076,6 +1080,17 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
do_sync = true;
}
+ } else if (data[0] == GIP_CMD_ANNOUNCE) {
+ int error;
+
+ if (xpad->delay_init && !xpad->delayed_init_done) {
+ xpad->delayed_init_done = true;
+ error = xpad_start_input(xpad);
+ if (error)
+ dev_warn(&xpad->dev->dev,
+ "unable to start delayed input: %d\n",
+ error);
+ }
} else if (data[0] == GIP_CMD_INPUT) { /* The main valid packet type for inputs */
/* menu/view buttons */
input_report_key(dev, BTN_START, data[4] & BIT(2));
@@ -1254,6 +1269,14 @@ static bool xpad_prepare_next_init_packet(struct usb_xpad *xpad)
if (xpad->xtype != XTYPE_XBOXONE)
return false;
+ /*
+ * Some dongles will discard init packets if they're sent before the
+ * controller connects. In these cases, we need to wait until we get
+ * an announce packet from them to send the init packet sequence.
+ */
+ if (xpad->delay_init && !xpad->delayed_init_done)
+ return false;
+
/* Perform initialization sequence for Xbox One pads that require it */
while (xpad->init_seq < ARRAY_SIZE(xboxone_init_packets)) {
init_packet = &xboxone_init_packets[xpad->init_seq++];
@@ -2069,6 +2092,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping = xpad_device[i].mapping;
xpad->xtype = xpad_device[i].xtype;
xpad->name = xpad_device[i].name;
+ if (xpad_device[i].flags & FLAG_DELAY_INIT)
+ xpad->delay_init = true;
+
xpad->packet_type = PKT_XB;
INIT_WORK(&xpad->work, xpad_presence_work);
@@ -2268,6 +2294,7 @@ static int xpad_resume(struct usb_interface *intf)
struct usb_xpad *xpad = usb_get_intfdata(intf);
struct input_dev *input = xpad->dev;
+ xpad->delayed_init_done = false;
if (xpad->xtype == XTYPE_XBOX360W)
return xpad360w_start_input(xpad);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index adf0f311996c..3ff2fcf05ad5 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -37,7 +37,7 @@ static int atkbd_set = 2;
module_param_named(set, atkbd_set, int, 0);
MODULE_PARM_DESC(set, "Select keyboard code set (2 = default, 3 = PS/2 native)");
-#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__) || defined(__loongarch__)
static bool atkbd_reset;
#else
static bool atkbd_reset = true;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 5c39a217b94c..f9db86da0818 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -449,6 +449,8 @@ static enum hrtimer_restart gpio_keys_irq_timer(struct hrtimer *t)
release_timer);
struct input_dev *input = bdata->input;
+ guard(spinlock_irqsave)(&bdata->lock);
+
if (bdata->key_pressed) {
input_report_key(input, *bdata->code, 0);
input_sync(input);
@@ -486,7 +488,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
if (bdata->release_delay)
hrtimer_start(&bdata->release_timer,
ms_to_ktime(bdata->release_delay),
- HRTIMER_MODE_REL_HARD);
+ HRTIMER_MODE_REL);
out:
return IRQ_HANDLED;
}
@@ -628,7 +630,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
bdata->release_delay = button->debounce_interval;
hrtimer_setup(&bdata->release_timer, gpio_keys_irq_timer,
- CLOCK_REALTIME, HRTIMER_MODE_REL_HARD);
+ CLOCK_REALTIME, HRTIMER_MODE_REL);
isr = gpio_keys_irq_isr;
irqflags = 0;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index e46473cb817c..e50a6fea9a60 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -104,6 +104,16 @@ static void disable_row_irqs(struct matrix_keypad *keypad)
disable_irq_nosync(keypad->row_irqs[i]);
}
+static uint32_t read_row_state(struct matrix_keypad *keypad)
+{
+ int row;
+ u32 row_state = 0;
+
+ for (row = 0; row < keypad->num_row_gpios; row++)
+ row_state |= row_asserted(keypad, row) ? BIT(row) : 0;
+ return row_state;
+}
+
/*
* This gets the keys from keyboard and reports it to input subsystem
*/
@@ -115,6 +125,10 @@ static void matrix_keypad_scan(struct work_struct *work)
const unsigned short *keycodes = input_dev->keycode;
uint32_t new_state[MATRIX_MAX_COLS];
int row, col, code;
+ u32 init_row_state, new_row_state;
+
+ /* read initial row state to detect changes between scan */
+ init_row_state = read_row_state(keypad);
/* de-activate all columns for scanning */
activate_all_cols(keypad, false);
@@ -129,9 +143,7 @@ static void matrix_keypad_scan(struct work_struct *work)
activate_col(keypad, col, true);
- for (row = 0; row < keypad->num_row_gpios; row++)
- new_state[col] |=
- row_asserted(keypad, row) ? BIT(row) : 0;
+ new_state[col] = read_row_state(keypad);
activate_col(keypad, col, false);
}
@@ -165,6 +177,18 @@ static void matrix_keypad_scan(struct work_struct *work)
keypad->scan_pending = false;
enable_row_irqs(keypad);
}
+
+ /* read new row state and detect if value has changed */
+ new_row_state = read_row_state(keypad);
+ if (init_row_state != new_row_state) {
+ guard(spinlock_irq)(&keypad->lock);
+ if (unlikely(keypad->scan_pending || keypad->stopped))
+ return;
+ disable_row_irqs(keypad);
+ keypad->scan_pending = true;
+ schedule_delayed_work(&keypad->work,
+ msecs_to_jiffies(keypad->debounce_ms));
+ }
}
static irqreturn_t matrix_keypad_interrupt(int irq, void *id)
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index bbf409dda89f..fe7398eeb828 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -27,6 +27,8 @@
#define SNVS_HPSR_BTN BIT(6)
#define SNVS_LPSR_SPO BIT(18)
#define SNVS_LPCR_DEP_EN BIT(5)
+#define SNVS_LPCR_BPT_SHIFT 16
+#define SNVS_LPCR_BPT_MASK (3 << SNVS_LPCR_BPT_SHIFT)
#define DEBOUNCE_TIME 30
#define REPEAT_INTERVAL 60
@@ -114,6 +116,8 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
struct device_node *np;
struct clk *clk;
int error;
+ unsigned int val;
+ unsigned int bpt;
u32 vid;
/* Get SNVS register Page */
@@ -148,6 +152,27 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
if (pdata->irq < 0)
return -EINVAL;
+ error = of_property_read_u32(np, "power-off-time-sec", &val);
+ if (!error) {
+ switch (val) {
+ case 0:
+ bpt = 0x3;
+ break;
+ case 5:
+ case 10:
+ case 15:
+ bpt = (val / 5) - 1;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "power-off-time-sec %d out of range\n", val);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pdata->snvs, SNVS_LPCR_REG, SNVS_LPCR_BPT_MASK,
+ bpt << SNVS_LPCR_BPT_SHIFT);
+ }
+
regmap_read(pdata->snvs, SNVS_HPVIDR1_REG, &vid);
pdata->minor_rev = vid & 0xff;
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index d9ee14b1f451..4581f1c53644 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -844,6 +844,12 @@ static int ims_pcu_flash_firmware(struct ims_pcu *pcu,
addr = be32_to_cpu(rec->addr) / 2;
len = be16_to_cpu(rec->len);
+ if (len > sizeof(pcu->cmd_buf) - 1 - sizeof(*fragment)) {
+ dev_err(pcu->dev,
+ "Invalid record length in firmware: %d\n", len);
+ return -EINVAL;
+ }
+
fragment = (void *)&pcu->cmd_buf[1];
put_unaligned_le32(addr, &fragment->addr);
fragment->len = len;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index c06b62f87b9b..9c17dfa76703 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2024,9 +2024,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (!iommu->dev)
return -ENODEV;
- /* Prevent binding other PCI device drivers to IOMMU devices */
- iommu->dev->match_driver = false;
-
/* ACPI _PRT won't have an IRQ for IOMMU */
iommu->dev->irq_managed = 1;
diff --git a/drivers/leds/.kunitconfig b/drivers/leds/.kunitconfig
new file mode 100644
index 000000000000..5180f77910a1
--- /dev/null
+++ b/drivers/leds/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_KUNIT_TEST=y
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a104cbb0a001..6e3dce7e35a4 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -55,6 +55,13 @@ config LEDS_BRIGHTNESS_HW_CHANGED
See Documentation/ABI/testing/sysfs-class-led for details.
+config LEDS_KUNIT_TEST
+ tristate "KUnit tests for LEDs"
+ depends on KUNIT && LEDS_CLASS
+ default KUNIT_ALL_TESTS
+ help
+ Say Y here to enable KUnit testing for the LEDs framework.
+
comment "LED drivers"
config LEDS_88PM860X
@@ -735,7 +742,7 @@ config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
depends on MACH_KIRKWOOD || MACH_ARMADA_370 || COMPILE_TEST
- default y
+ default y if MACH_KIRKWOOD || MACH_ARMADA_370
help
This option enables support for the dual-GPIO LEDs found on the
following LaCie/Seagate boards:
@@ -750,7 +757,7 @@ config LEDS_NETXBIG
depends on LEDS_CLASS
depends on MACH_KIRKWOOD || COMPILE_TEST
depends on OF_GPIO
- default y
+ default MACH_KIRKWOOD
help
This option enables support for LEDs found on the LaCie 2Big
and 5Big Network v2 boards. The LEDs are wired to a CPLD and are
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2f170d69dcbf..9a0333ec1a86 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_LEDS_CLASS) += led-class.o
obj-$(CONFIG_LEDS_CLASS_FLASH) += led-class-flash.o
obj-$(CONFIG_LEDS_CLASS_MULTICOLOR) += led-class-multicolor.o
obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
+obj-$(CONFIG_LEDS_KUNIT_TEST) += led-test.o
# LED Platform Drivers (keep this sorted, M-| sort)
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index effaaaf302b5..c9027f9c4bb7 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -450,7 +450,7 @@ static int sso_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(reg_val & BIT(offset));
}
-static void sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct sso_led_priv *priv = gpiochip_get_data(chip);
@@ -458,6 +458,8 @@ static void sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
if (!priv->gpio.freq)
regmap_update_bits(priv->mmap, SSO_CON0, SSO_CON0_SWU,
SSO_CON0_SWU);
+
+ return 0;
}
static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
@@ -469,7 +471,7 @@ static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
gc->get_direction = sso_gpio_get_dir;
gc->direction_output = sso_gpio_dir_out;
gc->get = sso_gpio_get;
- gc->set = sso_gpio_set;
+ gc->set_rv = sso_gpio_set;
gc->label = "lgm-sso";
gc->base = -1;
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index f39f0bfe6eef..55ca663ca506 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -132,4 +132,15 @@ config LEDS_SY7802
This driver can be built as a module, it will be called "leds-sy7802".
+config LEDS_TPS6131X
+ tristate "LED support for TI TPS6131x flash LED driver"
+ depends on I2C && OF
+ depends on GPIOLIB
+ select REGMAP_I2C
+ help
+ This option enables support for Texas Instruments TPS61310/TPS61311
+ flash LED driver.
+
+ This driver can be built as a module, it will be called "leds-tps6131x".
+
endif # LEDS_CLASS_FLASH
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
index 48860eeced79..712fb737a428 100644
--- a/drivers/leds/flash/Makefile
+++ b/drivers/leds/flash/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_LEDS_RT4505) += leds-rt4505.o
obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
obj-$(CONFIG_LEDS_SGM3140) += leds-sgm3140.o
obj-$(CONFIG_LEDS_SY7802) += leds-sy7802.o
+obj-$(CONFIG_LEDS_TPS6131X) += leds-tps6131x.o
diff --git a/drivers/leds/flash/leds-tps6131x.c b/drivers/leds/flash/leds-tps6131x.c
new file mode 100644
index 000000000000..6f4d4fd55361
--- /dev/null
+++ b/drivers/leds/flash/leds-tps6131x.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Texas Instruments TPS61310/TPS61311 flash LED driver with I2C interface
+ *
+ * Copyright 2025 Matthias Fend <matthias.fend@emfend.at>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/led-class-flash.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define TPS6131X_REG_0 0x00
+#define TPS6131X_REG_0_RESET BIT(7)
+#define TPS6131X_REG_0_DCLC13 GENMASK(5, 3)
+#define TPS6131X_REG_0_DCLC13_SHIFT 3
+#define TPS6131X_REG_0_DCLC2 GENMASK(2, 0)
+#define TPS6131X_REG_0_DCLC2_SHIFT 0
+
+#define TPS6131X_REG_1 0x01
+#define TPS6131X_REG_1_MODE GENMASK(7, 6)
+#define TPS6131X_REG_1_MODE_SHIFT 6
+#define TPS6131X_REG_1_FC2 GENMASK(5, 0)
+#define TPS6131X_REG_1_FC2_SHIFT 0
+
+#define TPS6131X_REG_2 0x02
+#define TPS6131X_REG_2_MODE GENMASK(7, 6)
+#define TPS6131X_REG_2_MODE_SHIFT 6
+#define TPS6131X_REG_2_ENVM BIT(5)
+#define TPS6131X_REG_2_FC13 GENMASK(4, 0)
+#define TPS6131X_REG_2_FC13_SHIFT 0
+
+#define TPS6131X_REG_3 0x03
+#define TPS6131X_REG_3_STIM GENMASK(7, 5)
+#define TPS6131X_REG_3_STIM_SHIFT 5
+#define TPS6131X_REG_3_HPFL BIT(4)
+#define TPS6131X_REG_3_SELSTIM_TO BIT(3)
+#define TPS6131X_REG_3_STT BIT(2)
+#define TPS6131X_REG_3_SFT BIT(1)
+#define TPS6131X_REG_3_TXMASK BIT(0)
+
+#define TPS6131X_REG_4 0x04
+#define TPS6131X_REG_4_PG BIT(7)
+#define TPS6131X_REG_4_HOTDIE_HI BIT(6)
+#define TPS6131X_REG_4_HOTDIE_LO BIT(5)
+#define TPS6131X_REG_4_ILIM BIT(4)
+#define TPS6131X_REG_4_INDC GENMASK(3, 0)
+#define TPS6131X_REG_4_INDC_SHIFT 0
+
+#define TPS6131X_REG_5 0x05
+#define TPS6131X_REG_5_SELFCAL BIT(7)
+#define TPS6131X_REG_5_ENPSM BIT(6)
+#define TPS6131X_REG_5_STSTRB1_DIR BIT(5)
+#define TPS6131X_REG_5_GPIO BIT(4)
+#define TPS6131X_REG_5_GPIOTYPE BIT(3)
+#define TPS6131X_REG_5_ENLED3 BIT(2)
+#define TPS6131X_REG_5_ENLED2 BIT(1)
+#define TPS6131X_REG_5_ENLED1 BIT(0)
+
+#define TPS6131X_REG_6 0x06
+#define TPS6131X_REG_6_ENTS BIT(7)
+#define TPS6131X_REG_6_LEDHOT BIT(6)
+#define TPS6131X_REG_6_LEDWARN BIT(5)
+#define TPS6131X_REG_6_LEDHDR BIT(4)
+#define TPS6131X_REG_6_OV GENMASK(3, 0)
+#define TPS6131X_REG_6_OV_SHIFT 0
+
+#define TPS6131X_REG_7 0x07
+#define TPS6131X_REG_7_ENBATMON BIT(7)
+#define TPS6131X_REG_7_BATDROOP GENMASK(6, 4)
+#define TPS6131X_REG_7_BATDROOP_SHIFT 4
+#define TPS6131X_REG_7_REVID GENMASK(2, 0)
+#define TPS6131X_REG_7_REVID_SHIFT 0
+
+#define TPS6131X_MAX_CHANNELS 3
+
+#define TPS6131X_FLASH_MAX_I_CHAN13_MA 400
+#define TPS6131X_FLASH_MAX_I_CHAN2_MA 800
+#define TPS6131X_FLASH_STEP_I_MA 25
+
+#define TPS6131X_TORCH_MAX_I_CHAN13_MA 175
+#define TPS6131X_TORCH_MAX_I_CHAN2_MA 175
+#define TPS6131X_TORCH_STEP_I_MA 25
+
+/* The torch watchdog timer must be refreshed within an interval of 13 seconds. */
+#define TPS6131X_TORCH_REFRESH_INTERVAL_JIFFIES msecs_to_jiffies(10000)
+
+#define UA_TO_MA(UA) ((UA) / 1000)
+
+enum tps6131x_mode {
+ TPS6131X_MODE_SHUTDOWN = 0x0,
+ TPS6131X_MODE_TORCH = 0x1,
+ TPS6131X_MODE_FLASH = 0x2,
+};
+
+struct tps6131x {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ /*
+ * Registers 0, 1, 2, and 3 control parts of the controller that are not completely
+ * independent of each other. Since some operations require the registers to be written in
+ * a specific order to avoid unwanted side effects, they are synchronized with a lock.
+ */
+ struct mutex lock; /* Hardware access lock for register 0, 1, 2 and 3 */
+ struct delayed_work torch_refresh_work;
+ bool valley_current_limit;
+ bool chan1_en;
+ bool chan2_en;
+ bool chan3_en;
+ struct fwnode_handle *led_node;
+ u32 max_flash_current_ma;
+ u32 step_flash_current_ma;
+ u32 max_torch_current_ma;
+ u32 step_torch_current_ma;
+ u32 max_timeout_us;
+ struct led_classdev_flash fled_cdev;
+ struct v4l2_flash *v4l2_flash;
+};
+
+static struct tps6131x *fled_cdev_to_tps6131x(struct led_classdev_flash *fled_cdev)
+{
+ return container_of(fled_cdev, struct tps6131x, fled_cdev);
+}
+
+/*
+ * Register contents after a power on/reset. These values cannot be changed.
+ */
+
+#define TPS6131X_DCLC2_50MA 2
+#define TPS6131X_DCLC13_25MA 1
+#define TPS6131X_FC2_400MA 16
+#define TPS6131X_FC13_200MA 8
+#define TPS6131X_STIM_0_579MS_1_37MS 6
+#define TPS6131X_SELSTIM_RANGE0 0
+#define TPS6131X_INDC_OFF 0
+#define TPS6131X_OV_4950MV 9
+#define TPS6131X_BATDROOP_150MV 4
+
+static const struct reg_default tps6131x_regmap_defaults[] = {
+ { TPS6131X_REG_0, (TPS6131X_DCLC13_25MA << TPS6131X_REG_0_DCLC13_SHIFT) |
+ (TPS6131X_DCLC2_50MA << TPS6131X_REG_0_DCLC2_SHIFT) },
+ { TPS6131X_REG_1, (TPS6131X_MODE_SHUTDOWN << TPS6131X_REG_1_MODE_SHIFT) |
+ (TPS6131X_FC2_400MA << TPS6131X_REG_1_FC2_SHIFT) },
+ { TPS6131X_REG_2, (TPS6131X_MODE_SHUTDOWN << TPS6131X_REG_2_MODE_SHIFT) |
+ (TPS6131X_FC13_200MA << TPS6131X_REG_2_FC13_SHIFT) },
+ { TPS6131X_REG_3, (TPS6131X_STIM_0_579MS_1_37MS << TPS6131X_REG_3_STIM_SHIFT) |
+ (TPS6131X_SELSTIM_RANGE0 << TPS6131X_REG_3_SELSTIM_TO) |
+ TPS6131X_REG_3_TXMASK },
+ { TPS6131X_REG_4, (TPS6131X_INDC_OFF << TPS6131X_REG_4_INDC_SHIFT) },
+ { TPS6131X_REG_5, TPS6131X_REG_5_ENPSM | TPS6131X_REG_5_STSTRB1_DIR |
+ TPS6131X_REG_5_GPIOTYPE | TPS6131X_REG_5_ENLED2 },
+ { TPS6131X_REG_6, (TPS6131X_OV_4950MV << TPS6131X_REG_6_OV_SHIFT) },
+ { TPS6131X_REG_7, (TPS6131X_BATDROOP_150MV << TPS6131X_REG_7_BATDROOP_SHIFT) },
+};
+
+/*
+ * These registers contain flags that are reset when read.
+ */
+static bool tps6131x_regmap_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS6131X_REG_3:
+ case TPS6131X_REG_4:
+ case TPS6131X_REG_6:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tps6131x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS6131X_REG_7,
+ .reg_defaults = tps6131x_regmap_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tps6131x_regmap_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .precious_reg = &tps6131x_regmap_precious,
+};
+
+struct tps6131x_timer_config {
+ u8 val;
+ u8 range;
+ u32 time_us;
+};
+
+static const struct tps6131x_timer_config tps6131x_timer_configs[] = {
+ { .val = 0, .range = 1, .time_us = 5300 },
+ { .val = 1, .range = 1, .time_us = 10700 },
+ { .val = 2, .range = 1, .time_us = 16000 },
+ { .val = 3, .range = 1, .time_us = 21300 },
+ { .val = 4, .range = 1, .time_us = 26600 },
+ { .val = 5, .range = 1, .time_us = 32000 },
+ { .val = 6, .range = 1, .time_us = 37300 },
+ { .val = 0, .range = 0, .time_us = 68200 },
+ { .val = 7, .range = 1, .time_us = 71500 },
+ { .val = 1, .range = 0, .time_us = 102200 },
+ { .val = 2, .range = 0, .time_us = 136300 },
+ { .val = 3, .range = 0, .time_us = 170400 },
+ { .val = 4, .range = 0, .time_us = 204500 },
+ { .val = 5, .range = 0, .time_us = 340800 },
+ { .val = 6, .range = 0, .time_us = 579300 },
+ { .val = 7, .range = 0, .time_us = 852000 },
+};
+
+static const struct tps6131x_timer_config *tps6131x_find_closest_timer_config(u32 timeout_us)
+{
+ const struct tps6131x_timer_config *timer_config = &tps6131x_timer_configs[0];
+ u32 diff, min_diff = U32_MAX;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps6131x_timer_configs); i++) {
+ diff = abs(tps6131x_timer_configs[i].time_us - timeout_us);
+ if (diff < min_diff) {
+ timer_config = &tps6131x_timer_configs[i];
+ min_diff = diff;
+ if (!min_diff)
+ break;
+ }
+ }
+
+ return timer_config;
+}
+
+static int tps6131x_reset_chip(struct tps6131x *tps6131x)
+{
+ int ret;
+
+ if (tps6131x->reset_gpio) {
+ gpiod_set_value_cansleep(tps6131x->reset_gpio, 1);
+ fsleep(10);
+ gpiod_set_value_cansleep(tps6131x->reset_gpio, 0);
+ fsleep(100);
+ } else {
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_0, TPS6131X_REG_0_RESET,
+ TPS6131X_REG_0_RESET);
+ if (ret)
+ return ret;
+
+ fsleep(100);
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_0, TPS6131X_REG_0_RESET, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps6131x_init_chip(struct tps6131x *tps6131x)
+{
+ u32 val;
+ int ret;
+
+ val = tps6131x->valley_current_limit ? TPS6131X_REG_4_ILIM : 0;
+
+ ret = regmap_write(tps6131x->regmap, TPS6131X_REG_4, val);
+ if (ret)
+ return ret;
+
+ val = TPS6131X_REG_5_ENPSM | TPS6131X_REG_5_STSTRB1_DIR | TPS6131X_REG_5_GPIOTYPE;
+
+ if (tps6131x->chan1_en)
+ val |= TPS6131X_REG_5_ENLED1;
+
+ if (tps6131x->chan2_en)
+ val |= TPS6131X_REG_5_ENLED2;
+
+ if (tps6131x->chan3_en)
+ val |= TPS6131X_REG_5_ENLED3;
+
+ ret = regmap_write(tps6131x->regmap, TPS6131X_REG_5, val);
+ if (ret)
+ return ret;
+
+ val = TPS6131X_REG_6_ENTS;
+
+ ret = regmap_write(tps6131x->regmap, TPS6131X_REG_6, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6131x_set_mode(struct tps6131x *tps6131x, enum tps6131x_mode mode, bool force)
+{
+ u8 val = mode << TPS6131X_REG_1_MODE_SHIFT;
+
+ return regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_1, TPS6131X_REG_1_MODE, val,
+ NULL, false, force);
+}
+
+static void tps6131x_torch_refresh_handler(struct work_struct *work)
+{
+ struct tps6131x *tps6131x = container_of(work, struct tps6131x, torch_refresh_work.work);
+ int ret;
+
+ guard(mutex)(&tps6131x->lock);
+
+ ret = tps6131x_set_mode(tps6131x, TPS6131X_MODE_TORCH, true);
+ if (ret < 0) {
+ dev_err(tps6131x->dev, "Failed to refresh torch watchdog timer\n");
+ return;
+ }
+
+ schedule_delayed_work(&tps6131x->torch_refresh_work,
+ TPS6131X_TORCH_REFRESH_INTERVAL_JIFFIES);
+}
+
+static int tps6131x_brightness_set(struct led_classdev *cdev, enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(cdev);
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ u32 num_chans, steps_chan13, steps_chan2, steps_remaining;
+ u8 reg0;
+ int ret;
+
+ cancel_delayed_work_sync(&tps6131x->torch_refresh_work);
+
+ /*
+ * The brightness parameter uses the number of current steps as the unit (not the current
+ * value itself). Since the reported step size can vary depending on the configuration,
+ * this value must be converted into actual register steps.
+ */
+ steps_remaining = (brightness * tps6131x->step_torch_current_ma) / TPS6131X_TORCH_STEP_I_MA;
+
+ num_chans = tps6131x->chan1_en + tps6131x->chan2_en + tps6131x->chan3_en;
+
+ /*
+ * The currents are distributed as evenly as possible across the activated channels.
+ * Since channels 1 and 3 share the same register setting, they always use the same current
+ * value. Channel 2 supports higher currents and thus takes over the remaining additional
+ * portion that cannot be covered by the other channels.
+ */
+ steps_chan13 = min_t(u32, steps_remaining / num_chans,
+ TPS6131X_TORCH_MAX_I_CHAN13_MA / TPS6131X_TORCH_STEP_I_MA);
+ if (tps6131x->chan1_en)
+ steps_remaining -= steps_chan13;
+ if (tps6131x->chan3_en)
+ steps_remaining -= steps_chan13;
+
+ steps_chan2 = min_t(u32, steps_remaining,
+ TPS6131X_TORCH_MAX_I_CHAN2_MA / TPS6131X_TORCH_STEP_I_MA);
+
+ guard(mutex)(&tps6131x->lock);
+
+ reg0 = (steps_chan13 << TPS6131X_REG_0_DCLC13_SHIFT) |
+ (steps_chan2 << TPS6131X_REG_0_DCLC2_SHIFT);
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_0,
+ TPS6131X_REG_0_DCLC13 | TPS6131X_REG_0_DCLC2, reg0);
+ if (ret < 0)
+ return ret;
+
+ ret = tps6131x_set_mode(tps6131x, brightness ? TPS6131X_MODE_TORCH : TPS6131X_MODE_SHUTDOWN,
+ true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * In order to use both the flash and the video light functions purely via the I2C
+ * interface, STRB1 must be low. If STRB1 is low, then the video light watchdog timer
+ * is also active, which puts the device into the shutdown state after around 13 seconds.
+ * To prevent this, the mode must be refreshed within the watchdog timeout.
+ */
+ if (brightness)
+ schedule_delayed_work(&tps6131x->torch_refresh_work,
+ TPS6131X_TORCH_REFRESH_INTERVAL_JIFFIES);
+
+ return 0;
+}
+
+static int tps6131x_strobe_set(struct led_classdev_flash *fled_cdev, bool state)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ int ret;
+
+ guard(mutex)(&tps6131x->lock);
+
+ ret = tps6131x_set_mode(tps6131x, state ? TPS6131X_MODE_FLASH : TPS6131X_MODE_SHUTDOWN,
+ true);
+ if (ret < 0)
+ return ret;
+
+ if (state) {
+ ret = regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_3, TPS6131X_REG_3_SFT,
+ TPS6131X_REG_3_SFT, NULL, false, true);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_3, TPS6131X_REG_3_SFT, 0, NULL,
+ false, true);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6131x_flash_brightness_set(struct led_classdev_flash *fled_cdev, u32 brightness)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ u32 num_chans;
+ u32 steps_chan13, steps_chan2;
+ u32 steps_remaining;
+ int ret;
+
+ steps_remaining = brightness / TPS6131X_FLASH_STEP_I_MA;
+ num_chans = tps6131x->chan1_en + tps6131x->chan2_en + tps6131x->chan3_en;
+ steps_chan13 = min_t(u32, steps_remaining / num_chans,
+ TPS6131X_FLASH_MAX_I_CHAN13_MA / TPS6131X_FLASH_STEP_I_MA);
+ if (tps6131x->chan1_en)
+ steps_remaining -= steps_chan13;
+ if (tps6131x->chan3_en)
+ steps_remaining -= steps_chan13;
+ steps_chan2 = min_t(u32, steps_remaining,
+ TPS6131X_FLASH_MAX_I_CHAN2_MA / TPS6131X_FLASH_STEP_I_MA);
+
+ guard(mutex)(&tps6131x->lock);
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_2, TPS6131X_REG_2_FC13,
+ steps_chan13 << TPS6131X_REG_2_FC13_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_1, TPS6131X_REG_1_FC2,
+ steps_chan2 << TPS6131X_REG_1_FC2_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ fled_cdev->brightness.val = brightness;
+
+ return 0;
+}
+
+static int tps6131x_flash_timeout_set(struct led_classdev_flash *fled_cdev, u32 timeout_us)
+{
+ const struct tps6131x_timer_config *timer_config;
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ u8 reg3;
+ int ret;
+
+ guard(mutex)(&tps6131x->lock);
+
+ timer_config = tps6131x_find_closest_timer_config(timeout_us);
+
+ reg3 = timer_config->val << TPS6131X_REG_3_STIM_SHIFT;
+ if (timer_config->range)
+ reg3 |= TPS6131X_REG_3_SELSTIM_TO;
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_3,
+ TPS6131X_REG_3_STIM | TPS6131X_REG_3_SELSTIM_TO, reg3);
+ if (ret < 0)
+ return ret;
+
+ fled_cdev->timeout.val = timer_config->time_us;
+
+ return 0;
+}
+
+static int tps6131x_strobe_get(struct led_classdev_flash *fled_cdev, bool *state)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ unsigned int reg3;
+ int ret;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_3, &reg3);
+ if (ret)
+ return ret;
+
+ *state = !!(reg3 & TPS6131X_REG_3_SFT);
+
+ return 0;
+}
+
+static int tps6131x_flash_fault_get(struct led_classdev_flash *fled_cdev, u32 *fault)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ unsigned int reg3, reg4, reg6;
+ int ret;
+
+ *fault = 0;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_3, &reg3);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_4, &reg4);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_6, &reg6);
+ if (ret < 0)
+ return ret;
+
+ if (reg3 & TPS6131X_REG_3_HPFL)
+ *fault |= LED_FAULT_SHORT_CIRCUIT;
+
+ if (reg3 & TPS6131X_REG_3_SELSTIM_TO)
+ *fault |= LED_FAULT_TIMEOUT;
+
+ if (reg4 & TPS6131X_REG_4_HOTDIE_HI)
+ *fault |= LED_FAULT_OVER_TEMPERATURE;
+
+ if (reg6 & (TPS6131X_REG_6_LEDHOT | TPS6131X_REG_6_LEDWARN))
+ *fault |= LED_FAULT_LED_OVER_TEMPERATURE;
+
+ if (!(reg6 & TPS6131X_REG_6_LEDHDR))
+ *fault |= LED_FAULT_UNDER_VOLTAGE;
+
+ if (reg6 & TPS6131X_REG_6_LEDHOT) {
+ ret = regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_6,
+ TPS6131X_REG_6_LEDHOT, 0, NULL, false, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct led_flash_ops flash_ops = {
+ .flash_brightness_set = tps6131x_flash_brightness_set,
+ .strobe_set = tps6131x_strobe_set,
+ .strobe_get = tps6131x_strobe_get,
+ .timeout_set = tps6131x_flash_timeout_set,
+ .fault_get = tps6131x_flash_fault_get,
+};
+
+static int tps6131x_parse_node(struct tps6131x *tps6131x)
+{
+ const struct tps6131x_timer_config *timer_config;
+ struct device *dev = tps6131x->dev;
+ u32 channels[TPS6131X_MAX_CHANNELS];
+ u32 current_step_multiplier;
+ u32 current_ua;
+ u32 max_current_flash_ma, max_current_torch_ma;
+ u32 timeout_us;
+ int num_channels;
+ int i;
+ int ret;
+
+ tps6131x->valley_current_limit = device_property_read_bool(dev, "ti,valley-current-limit");
+
+ tps6131x->led_node = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+ if (!tps6131x->led_node) {
+ dev_err(dev, "Missing LED node\n");
+ return -EINVAL;
+ }
+
+ num_channels = fwnode_property_count_u32(tps6131x->led_node, "led-sources");
+ if (num_channels <= 0) {
+ dev_err(dev, "Failed to read led-sources property\n");
+ return -EINVAL;
+ }
+
+ if (num_channels > TPS6131X_MAX_CHANNELS) {
+ dev_err(dev, "led-sources count %u exceeds maximum channel count %u\n",
+ num_channels, TPS6131X_MAX_CHANNELS);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32_array(tps6131x->led_node, "led-sources", channels,
+ num_channels);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read led-sources property\n");
+ return ret;
+ }
+
+ max_current_flash_ma = 0;
+ max_current_torch_ma = 0;
+ for (i = 0; i < num_channels; i++) {
+ switch (channels[i]) {
+ case 1:
+ tps6131x->chan1_en = true;
+ max_current_flash_ma += TPS6131X_FLASH_MAX_I_CHAN13_MA;
+ max_current_torch_ma += TPS6131X_TORCH_MAX_I_CHAN13_MA;
+ break;
+ case 2:
+ tps6131x->chan2_en = true;
+ max_current_flash_ma += TPS6131X_FLASH_MAX_I_CHAN2_MA;
+ max_current_torch_ma += TPS6131X_TORCH_MAX_I_CHAN2_MA;
+ break;
+ case 3:
+ tps6131x->chan3_en = true;
+ max_current_flash_ma += TPS6131X_FLASH_MAX_I_CHAN13_MA;
+ max_current_torch_ma += TPS6131X_TORCH_MAX_I_CHAN13_MA;
+ break;
+ default:
+ dev_err(dev, "led-source out of range [1-3]\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * If only channels 1 and 3 are used, the step size is doubled because the two channels
+ * share the same current control register.
+ */
+ current_step_multiplier =
+ (tps6131x->chan1_en && tps6131x->chan3_en && !tps6131x->chan2_en) ? 2 : 1;
+ tps6131x->step_flash_current_ma = current_step_multiplier * TPS6131X_FLASH_STEP_I_MA;
+ tps6131x->step_torch_current_ma = current_step_multiplier * TPS6131X_TORCH_STEP_I_MA;
+
+ ret = fwnode_property_read_u32(tps6131x->led_node, "led-max-microamp", &current_ua);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read led-max-microamp property\n");
+ return ret;
+ }
+
+ tps6131x->max_torch_current_ma = UA_TO_MA(current_ua);
+
+ if (!tps6131x->max_torch_current_ma ||
+ tps6131x->max_torch_current_ma > max_current_torch_ma ||
+ (tps6131x->max_torch_current_ma % tps6131x->step_torch_current_ma)) {
+ dev_err(dev, "led-max-microamp out of range or not a multiple of %u\n",
+ tps6131x->step_torch_current_ma);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32(tps6131x->led_node, "flash-max-microamp", &current_ua);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read flash-max-microamp property\n");
+ return ret;
+ }
+
+ tps6131x->max_flash_current_ma = UA_TO_MA(current_ua);
+
+ if (!tps6131x->max_flash_current_ma ||
+ tps6131x->max_flash_current_ma > max_current_flash_ma ||
+ (tps6131x->max_flash_current_ma % tps6131x->step_flash_current_ma)) {
+ dev_err(dev, "flash-max-microamp out of range or not a multiple of %u\n",
+ tps6131x->step_flash_current_ma);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32(tps6131x->led_node, "flash-max-timeout-us", &timeout_us);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read flash-max-timeout-us property\n");
+ return ret;
+ }
+
+ timer_config = tps6131x_find_closest_timer_config(timeout_us);
+ tps6131x->max_timeout_us = timer_config->time_us;
+
+ if (tps6131x->max_timeout_us != timeout_us)
+ dev_warn(dev, "flash-max-timeout-us %u not supported (using %u)\n", timeout_us,
+ tps6131x->max_timeout_us);
+
+ return 0;
+}
+
+static int tps6131x_led_class_setup(struct tps6131x *tps6131x)
+{
+ const struct tps6131x_timer_config *timer_config;
+ struct led_classdev *led_cdev;
+ struct led_flash_setting *setting;
+ struct led_init_data init_data = {};
+ int ret;
+
+ tps6131x->fled_cdev.ops = &flash_ops;
+
+ setting = &tps6131x->fled_cdev.timeout;
+ timer_config = tps6131x_find_closest_timer_config(0);
+ setting->min = timer_config->time_us;
+ setting->max = tps6131x->max_timeout_us;
+ setting->step = 1; /* Only some specific time periods are supported. No fixed step size. */
+ setting->val = setting->min;
+
+ setting = &tps6131x->fled_cdev.brightness;
+ setting->min = tps6131x->step_flash_current_ma;
+ setting->max = tps6131x->max_flash_current_ma;
+ setting->step = tps6131x->step_flash_current_ma;
+ setting->val = setting->min;
+
+ led_cdev = &tps6131x->fled_cdev.led_cdev;
+ led_cdev->brightness_set_blocking = tps6131x_brightness_set;
+ led_cdev->max_brightness = tps6131x->max_torch_current_ma;
+ led_cdev->flags |= LED_DEV_CAP_FLASH;
+
+ init_data.fwnode = tps6131x->led_node;
+ init_data.devicename = NULL;
+ init_data.default_label = NULL;
+ init_data.devname_mandatory = false;
+
+ ret = devm_led_classdev_flash_register_ext(tps6131x->dev, &tps6131x->fled_cdev,
+ &init_data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6131x_flash_external_strobe_set(struct v4l2_flash *v4l2_flash, bool enable)
+{
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+
+ guard(mutex)(&tps6131x->lock);
+
+ return tps6131x_set_mode(tps6131x, enable ? TPS6131X_MODE_FLASH : TPS6131X_MODE_SHUTDOWN,
+ false);
+}
+
+static const struct v4l2_flash_ops tps6131x_v4l2_flash_ops = {
+ .external_strobe_set = tps6131x_flash_external_strobe_set,
+};
+
+static int tps6131x_v4l2_setup(struct tps6131x *tps6131x)
+{
+ struct v4l2_flash_config v4l2_cfg = { 0 };
+ struct led_flash_setting *intensity = &v4l2_cfg.intensity;
+
+ intensity->min = tps6131x->step_torch_current_ma;
+ intensity->max = tps6131x->max_torch_current_ma;
+ intensity->step = tps6131x->step_torch_current_ma;
+ intensity->val = intensity->min;
+
+ strscpy(v4l2_cfg.dev_name, tps6131x->fled_cdev.led_cdev.dev->kobj.name,
+ sizeof(v4l2_cfg.dev_name));
+
+ v4l2_cfg.has_external_strobe = true;
+ v4l2_cfg.flash_faults = LED_FAULT_TIMEOUT | LED_FAULT_OVER_TEMPERATURE |
+ LED_FAULT_SHORT_CIRCUIT | LED_FAULT_UNDER_VOLTAGE |
+ LED_FAULT_LED_OVER_TEMPERATURE;
+
+ tps6131x->v4l2_flash = v4l2_flash_init(tps6131x->dev, tps6131x->led_node,
+ &tps6131x->fled_cdev, &tps6131x_v4l2_flash_ops,
+ &v4l2_cfg);
+ if (IS_ERR(tps6131x->v4l2_flash)) {
+ dev_err(tps6131x->dev, "Failed to initialize v4l2 flash LED\n");
+ return PTR_ERR(tps6131x->v4l2_flash);
+ }
+
+ return 0;
+}
+
+static int tps6131x_probe(struct i2c_client *client)
+{
+ struct tps6131x *tps6131x;
+ int ret;
+
+ tps6131x = devm_kzalloc(&client->dev, sizeof(*tps6131x), GFP_KERNEL);
+ if (!tps6131x)
+ return -ENOMEM;
+
+ tps6131x->dev = &client->dev;
+ i2c_set_clientdata(client, tps6131x);
+ mutex_init(&tps6131x->lock);
+ INIT_DELAYED_WORK(&tps6131x->torch_refresh_work, tps6131x_torch_refresh_handler);
+
+ ret = tps6131x_parse_node(tps6131x);
+ if (ret)
+ return ret;
+
+ tps6131x->regmap = devm_regmap_init_i2c(client, &tps6131x_regmap);
+ if (IS_ERR(tps6131x->regmap)) {
+ ret = PTR_ERR(tps6131x->regmap);
+ return dev_err_probe(&client->dev, ret, "Failed to allocate register map\n");
+ }
+
+ tps6131x->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(tps6131x->reset_gpio)) {
+ ret = PTR_ERR(tps6131x->reset_gpio);
+ return dev_err_probe(&client->dev, ret, "Failed to get reset GPIO\n");
+ }
+
+ ret = tps6131x_reset_chip(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to reset LED controller\n");
+
+ ret = tps6131x_init_chip(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to initialize LED controller\n");
+
+ ret = tps6131x_led_class_setup(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to setup LED class\n");
+
+ ret = tps6131x_v4l2_setup(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to setup v4l2 flash\n");
+
+ return 0;
+}
+
+static void tps6131x_remove(struct i2c_client *client)
+{
+ struct tps6131x *tps6131x = i2c_get_clientdata(client);
+
+ v4l2_flash_release(tps6131x->v4l2_flash);
+
+ cancel_delayed_work_sync(&tps6131x->torch_refresh_work);
+}
+
+static const struct of_device_id of_tps6131x_leds_match[] = {
+ { .compatible = "ti,tps61310" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tps6131x_leds_match);
+
+static struct i2c_driver tps6131x_i2c_driver = {
+ .driver = {
+ .name = "tps6131x",
+ .of_match_table = of_tps6131x_leds_match,
+ },
+ .probe = tps6131x_probe,
+ .remove = tps6131x_remove,
+};
+module_i2c_driver(tps6131x_i2c_driver);
+
+MODULE_DESCRIPTION("Texas Instruments TPS6131X flash LED driver");
+MODULE_AUTHOR("Matthias Fend <matthias.fend@emfend.at>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index f4e26ce84862..165035a8826c 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -440,6 +440,21 @@ int led_update_flash_brightness(struct led_classdev_flash *fled_cdev)
}
EXPORT_SYMBOL_GPL(led_update_flash_brightness);
+int led_set_flash_duration(struct led_classdev_flash *fled_cdev, u32 duration)
+{
+ struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+ struct led_flash_setting *s = &fled_cdev->duration;
+
+ s->val = duration;
+ led_clamp_align(s);
+
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ return call_flash_op(fled_cdev, duration_set, s->val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_set_flash_duration);
+
MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
MODULE_DESCRIPTION("LED Flash class interface");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
index b2a87c994816..fd66d2bdeace 100644
--- a/drivers/leds/led-class-multicolor.c
+++ b/drivers/leds/led-class-multicolor.c
@@ -59,7 +59,8 @@ static ssize_t multi_intensity_store(struct device *dev,
for (i = 0; i < mcled_cdev->num_colors; i++)
mcled_cdev->subled_info[i].intensity = intensity_value[i];
- led_set_brightness(led_cdev, led_cdev->brightness);
+ if (!test_bit(LED_BLINK_SW, &led_cdev->work_flags))
+ led_set_brightness(led_cdev, led_cdev->brightness);
ret = size;
err_out:
mutex_unlock(&led_cdev->led_access);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 907fc703e0c5..1a59a4f38479 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -529,6 +529,7 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
struct led_properties props = {};
struct fwnode_handle *fwnode = init_data->fwnode;
const char *devicename = init_data->devicename;
+ int n;
if (!led_classdev_name)
return -EINVAL;
@@ -542,45 +543,49 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
* Otherwise the label is prepended with devicename to compose
* the final LED class device name.
*/
- if (!devicename) {
- strscpy(led_classdev_name, props.label,
- LED_MAX_NAME_SIZE);
+ if (devicename) {
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, props.label);
} else {
- snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
- devicename, props.label);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s", props.label);
}
} else if (props.function || props.color_present) {
char tmp_buf[LED_MAX_NAME_SIZE];
if (props.func_enum_present) {
- snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d",
- props.color_present ? led_colors[props.color] : "",
- props.function ?: "", props.func_enum);
+ n = snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d",
+ props.color_present ? led_colors[props.color] : "",
+ props.function ?: "", props.func_enum);
} else {
- snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s",
- props.color_present ? led_colors[props.color] : "",
- props.function ?: "");
+ n = snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s",
+ props.color_present ? led_colors[props.color] : "",
+ props.function ?: "");
}
+ if (n >= LED_MAX_NAME_SIZE)
+ return -E2BIG;
+
if (init_data->devname_mandatory) {
- snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
- devicename, tmp_buf);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, tmp_buf);
} else {
- strscpy(led_classdev_name, tmp_buf, LED_MAX_NAME_SIZE);
-
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s", tmp_buf);
}
} else if (init_data->default_label) {
if (!devicename) {
dev_err(dev, "Legacy LED naming requires devicename segment");
return -EINVAL;
}
- snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
- devicename, init_data->default_label);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, init_data->default_label);
} else if (is_of_node(fwnode)) {
- strscpy(led_classdev_name, to_of_node(fwnode)->name,
- LED_MAX_NAME_SIZE);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s",
+ to_of_node(fwnode)->name);
} else
return -EINVAL;
+ if (n >= LED_MAX_NAME_SIZE)
+ return -E2BIG;
+
return 0;
}
EXPORT_SYMBOL_GPL(led_compose_name);
diff --git a/drivers/leds/led-test.c b/drivers/leds/led-test.c
new file mode 100644
index 000000000000..ddf9aa967a6a
--- /dev/null
+++ b/drivers/leds/led-test.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Google LLC
+ *
+ * Author: Lee Jones <lee@kernel.org>
+ */
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+
+#define LED_TEST_POST_REG_BRIGHTNESS 10
+
+struct led_test_ddata {
+ struct led_classdev cdev;
+ struct device *dev;
+};
+
+static enum led_brightness led_test_brightness_get(struct led_classdev *cdev)
+{
+ return LED_TEST_POST_REG_BRIGHTNESS;
+}
+
+static void led_test_class_register(struct kunit *test)
+{
+ struct led_test_ddata *ddata = test->priv;
+ struct led_classdev *cdev_clash, *cdev = &ddata->cdev;
+ struct device *dev = ddata->dev;
+ int ret;
+
+ /* Register a LED class device */
+ cdev->name = "led-test";
+ cdev->brightness_get = led_test_brightness_get;
+ cdev->brightness = 0;
+
+ ret = devm_led_classdev_register(dev, cdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cdev->max_brightness, LED_FULL);
+ KUNIT_EXPECT_EQ(test, cdev->brightness, LED_TEST_POST_REG_BRIGHTNESS);
+ KUNIT_EXPECT_STREQ(test, cdev->dev->kobj.name, "led-test");
+
+ /* Register again with the same name - expect it to pass with the LED renamed */
+ cdev_clash = devm_kmemdup(dev, cdev, sizeof(*cdev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cdev_clash);
+
+ ret = devm_led_classdev_register(dev, cdev_clash);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_STREQ(test, cdev_clash->dev->kobj.name, "led-test_1");
+ KUNIT_EXPECT_STREQ(test, cdev_clash->name, "led-test");
+
+ /* Enable name conflict rejection and register with the same name again - expect failure */
+ cdev_clash->flags |= LED_REJECT_NAME_CONFLICT;
+ ret = devm_led_classdev_register(dev, cdev_clash);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+}
+
+static void led_test_class_add_lookup_and_get(struct kunit *test)
+{
+ struct led_test_ddata *ddata = test->priv;
+ struct led_classdev *cdev = &ddata->cdev, *cdev_get;
+ struct device *dev = ddata->dev;
+ struct led_lookup_data lookup;
+ int ret;
+
+ /* First, register a LED class device */
+ cdev->name = "led-test";
+ ret = devm_led_classdev_register(dev, cdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Then make the LED available for lookup */
+ lookup.provider = cdev->name;
+ lookup.dev_id = dev_name(dev);
+ lookup.con_id = "led-test-1";
+ led_add_lookup(&lookup);
+
+ /* Finally, attempt to look it up via the API - imagine this was an orthogonal driver */
+ cdev_get = devm_led_get(dev, "led-test-1");
+ KUNIT_ASSERT_FALSE(test, IS_ERR(cdev_get));
+
+ KUNIT_EXPECT_STREQ(test, cdev_get->name, cdev->name);
+
+ led_remove_lookup(&lookup);
+}
+
+static struct kunit_case led_test_cases[] = {
+ KUNIT_CASE(led_test_class_register),
+ KUNIT_CASE(led_test_class_add_lookup_and_get),
+ { }
+};
+
+static int led_test_init(struct kunit *test)
+{
+ struct led_test_ddata *ddata;
+ struct device *dev;
+
+ ddata = kunit_kzalloc(test, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ test->priv = ddata;
+
+ dev = kunit_device_register(test, "led_test");
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ ddata->dev = get_device(dev);
+
+ return 0;
+}
+
+static void led_test_exit(struct kunit *test)
+{
+ struct led_test_ddata *ddata = test->priv;
+
+ if (ddata && ddata->dev)
+ put_device(ddata->dev);
+}
+
+static struct kunit_suite led_test_suite = {
+ .name = "led",
+ .init = led_test_init,
+ .exit = led_test_exit,
+ .test_cases = led_test_cases,
+};
+kunit_test_suite(led_test_suite);
+
+MODULE_AUTHOR("Lee Jones <lee@kernel.org>");
+MODULE_DESCRIPTION("KUnit tests for the LED framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index b2d40f87a5ff..3799dcc1cf07 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -54,6 +54,11 @@ ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
goto unlock;
}
+ if (sysfs_streq(buf, "default")) {
+ led_trigger_set_default(led_cdev);
+ goto unlock;
+ }
+
down_read(&triggers_list_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
if (sysfs_streq(buf, trig->name) && trigger_relevant(led_cdev, trig)) {
@@ -98,6 +103,9 @@ static int led_trigger_format(char *buf, size_t size,
int len = led_trigger_snprintf(buf, size, "%s",
led_cdev->trigger ? "none" : "[none]");
+ if (led_cdev->default_trigger)
+ len += led_trigger_snprintf(buf + len, size - len, " default");
+
list_for_each_entry(trig, &trigger_list, next_trig) {
bool hit;
@@ -281,6 +289,11 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
if (!led_cdev->default_trigger)
return;
+ if (!strcmp(led_cdev->default_trigger, "none")) {
+ led_trigger_remove(led_cdev);
+ return;
+ }
+
down_read(&triggers_list_lock);
down_write(&led_cdev->trigger_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
diff --git a/drivers/leds/leds-cros_ec.c b/drivers/leds/leds-cros_ec.c
index 275522b81ea5..377cf04e202a 100644
--- a/drivers/leds/leds-cros_ec.c
+++ b/drivers/leds/leds-cros_ec.c
@@ -60,31 +60,18 @@ static inline struct cros_ec_led_priv *cros_ec_led_cdev_to_priv(struct led_class
union cros_ec_led_cmd_data {
struct ec_params_led_control req;
struct ec_response_led_control resp;
-} __packed;
+};
static int cros_ec_led_send_cmd(struct cros_ec_device *cros_ec,
union cros_ec_led_cmd_data *arg)
{
int ret;
- struct {
- struct cros_ec_command msg;
- union cros_ec_led_cmd_data data;
- } __packed buf = {
- .msg = {
- .version = 1,
- .command = EC_CMD_LED_CONTROL,
- .insize = sizeof(arg->resp),
- .outsize = sizeof(arg->req),
- },
- .data.req = arg->req
- };
-
- ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+
+ ret = cros_ec_cmd(cros_ec, 1, EC_CMD_LED_CONTROL, &arg->req,
+ sizeof(arg->req), &arg->resp, sizeof(arg->resp));
if (ret < 0)
return ret;
- arg->resp = buf.data.resp;
-
return 0;
}
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 995f2adf8569..52b97c9f2a03 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -90,8 +90,6 @@
* @led_dev: led class device pointer
* @regmap: Devices register map
* @eeprom_regmap: EEPROM register map
- * @enable_gpio: VDDIO/EN gpio to enable communication interface
- * @regulator: LED supply regulator pointer
*/
struct lp8860_led {
struct mutex lock;
@@ -99,16 +97,9 @@ struct lp8860_led {
struct led_classdev led_dev;
struct regmap *regmap;
struct regmap *eeprom_regmap;
- struct gpio_desc *enable_gpio;
- struct regulator *regulator;
-};
-
-struct lp8860_eeprom_reg {
- uint8_t reg;
- uint8_t value;
};
-static struct lp8860_eeprom_reg lp8860_eeprom_disp_regs[] = {
+static const struct reg_sequence lp8860_eeprom_disp_regs[] = {
{ LP8860_EEPROM_REG_0, 0xed },
{ LP8860_EEPROM_REG_1, 0xdf },
{ LP8860_EEPROM_REG_2, 0xdc },
@@ -136,43 +127,29 @@ static struct lp8860_eeprom_reg lp8860_eeprom_disp_regs[] = {
{ LP8860_EEPROM_REG_24, 0x3E },
};
-static int lp8860_unlock_eeprom(struct lp8860_led *led, int lock)
+static int lp8860_unlock_eeprom(struct lp8860_led *led)
{
int ret;
- mutex_lock(&led->lock);
-
- if (lock == LP8860_UNLOCK_EEPROM) {
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_EEPROM_CODE_1);
- if (ret) {
- dev_err(&led->client->dev, "EEPROM Unlock failed\n");
- goto out;
- }
-
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_EEPROM_CODE_2);
- if (ret) {
- dev_err(&led->client->dev, "EEPROM Unlock failed\n");
- goto out;
- }
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_EEPROM_CODE_3);
- if (ret) {
- dev_err(&led->client->dev, "EEPROM Unlock failed\n");
- goto out;
- }
- } else {
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_LOCK_EEPROM);
+ guard(mutex)(&led->lock);
+
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_EEPROM_CODE_1);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ return ret;
+ }
+
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_EEPROM_CODE_2);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ return ret;
+ }
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_EEPROM_CODE_3);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ return ret;
}
-out:
- mutex_unlock(&led->lock);
return ret;
}
@@ -209,47 +186,35 @@ static int lp8860_brightness_set(struct led_classdev *led_cdev,
int disp_brightness = brt_val * 255;
int ret;
- mutex_lock(&led->lock);
+ guard(mutex)(&led->lock);
ret = lp8860_fault_check(led);
if (ret) {
dev_err(&led->client->dev, "Cannot read/clear faults\n");
- goto out;
+ return ret;
}
ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_MSB,
(disp_brightness & 0xff00) >> 8);
if (ret) {
dev_err(&led->client->dev, "Cannot write CL1 MSB\n");
- goto out;
+ return ret;
}
ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_LSB,
disp_brightness & 0xff);
if (ret) {
dev_err(&led->client->dev, "Cannot write CL1 LSB\n");
- goto out;
+ return ret;
}
-out:
- mutex_unlock(&led->lock);
- return ret;
+
+ return 0;
}
static int lp8860_init(struct lp8860_led *led)
{
unsigned int read_buf;
- int ret, i, reg_count;
-
- if (led->regulator) {
- ret = regulator_enable(led->regulator);
- if (ret) {
- dev_err(&led->client->dev,
- "Failed to enable regulator\n");
- return ret;
- }
- }
-
- gpiod_direction_output(led->enable_gpio, 1);
+ int ret, reg_count;
ret = lp8860_fault_check(led);
if (ret)
@@ -259,24 +224,20 @@ static int lp8860_init(struct lp8860_led *led)
if (ret)
goto out;
- ret = lp8860_unlock_eeprom(led, LP8860_UNLOCK_EEPROM);
+ ret = lp8860_unlock_eeprom(led);
if (ret) {
dev_err(&led->client->dev, "Failed unlocking EEPROM\n");
goto out;
}
reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs);
- for (i = 0; i < reg_count; i++) {
- ret = regmap_write(led->eeprom_regmap,
- lp8860_eeprom_disp_regs[i].reg,
- lp8860_eeprom_disp_regs[i].value);
- if (ret) {
- dev_err(&led->client->dev, "Failed writing EEPROM\n");
- goto out;
- }
+ ret = regmap_multi_reg_write(led->eeprom_regmap, lp8860_eeprom_disp_regs, reg_count);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed writing EEPROM\n");
+ goto out;
}
- ret = lp8860_unlock_eeprom(led, LP8860_LOCK_EEPROM);
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_LOCK_EEPROM);
if (ret)
goto out;
@@ -291,74 +252,14 @@ static int lp8860_init(struct lp8860_led *led)
return ret;
out:
- if (ret)
- gpiod_direction_output(led->enable_gpio, 0);
-
- if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
- dev_err(&led->client->dev,
- "Failed to disable regulator\n");
- }
-
return ret;
}
-static const struct reg_default lp8860_reg_defs[] = {
- { LP8860_DISP_CL1_BRT_MSB, 0x00},
- { LP8860_DISP_CL1_BRT_LSB, 0x00},
- { LP8860_DISP_CL1_CURR_MSB, 0x00},
- { LP8860_DISP_CL1_CURR_LSB, 0x00},
- { LP8860_CL2_BRT_MSB, 0x00},
- { LP8860_CL2_BRT_LSB, 0x00},
- { LP8860_CL2_CURRENT, 0x00},
- { LP8860_CL3_BRT_MSB, 0x00},
- { LP8860_CL3_BRT_LSB, 0x00},
- { LP8860_CL3_CURRENT, 0x00},
- { LP8860_CL4_BRT_MSB, 0x00},
- { LP8860_CL4_BRT_LSB, 0x00},
- { LP8860_CL4_CURRENT, 0x00},
- { LP8860_CONFIG, 0x00},
- { LP8860_FAULT_CLEAR, 0x00},
- { LP8860_EEPROM_CNTRL, 0x80},
- { LP8860_EEPROM_UNLOCK, 0x00},
-};
-
static const struct regmap_config lp8860_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = LP8860_EEPROM_UNLOCK,
- .reg_defaults = lp8860_reg_defs,
- .num_reg_defaults = ARRAY_SIZE(lp8860_reg_defs),
-};
-
-static const struct reg_default lp8860_eeprom_defs[] = {
- { LP8860_EEPROM_REG_0, 0x00 },
- { LP8860_EEPROM_REG_1, 0x00 },
- { LP8860_EEPROM_REG_2, 0x00 },
- { LP8860_EEPROM_REG_3, 0x00 },
- { LP8860_EEPROM_REG_4, 0x00 },
- { LP8860_EEPROM_REG_5, 0x00 },
- { LP8860_EEPROM_REG_6, 0x00 },
- { LP8860_EEPROM_REG_7, 0x00 },
- { LP8860_EEPROM_REG_8, 0x00 },
- { LP8860_EEPROM_REG_9, 0x00 },
- { LP8860_EEPROM_REG_10, 0x00 },
- { LP8860_EEPROM_REG_11, 0x00 },
- { LP8860_EEPROM_REG_12, 0x00 },
- { LP8860_EEPROM_REG_13, 0x00 },
- { LP8860_EEPROM_REG_14, 0x00 },
- { LP8860_EEPROM_REG_15, 0x00 },
- { LP8860_EEPROM_REG_16, 0x00 },
- { LP8860_EEPROM_REG_17, 0x00 },
- { LP8860_EEPROM_REG_18, 0x00 },
- { LP8860_EEPROM_REG_19, 0x00 },
- { LP8860_EEPROM_REG_20, 0x00 },
- { LP8860_EEPROM_REG_21, 0x00 },
- { LP8860_EEPROM_REG_22, 0x00 },
- { LP8860_EEPROM_REG_23, 0x00 },
- { LP8860_EEPROM_REG_24, 0x00 },
};
static const struct regmap_config lp8860_eeprom_regmap_config = {
@@ -366,10 +267,15 @@ static const struct regmap_config lp8860_eeprom_regmap_config = {
.val_bits = 8,
.max_register = LP8860_EEPROM_REG_24,
- .reg_defaults = lp8860_eeprom_defs,
- .num_reg_defaults = ARRAY_SIZE(lp8860_eeprom_defs),
};
+static void lp8860_disable_gpio(void *data)
+{
+ struct gpio_desc *gpio = data;
+
+ gpiod_set_value(gpio, 0);
+}
+
static int lp8860_probe(struct i2c_client *client)
{
int ret;
@@ -377,6 +283,7 @@ static int lp8860_probe(struct i2c_client *client)
struct device_node *np = dev_of_node(&client->dev);
struct device_node *child_node;
struct led_init_data init_data = {};
+ struct gpio_desc *enable_gpio;
led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
if (!led)
@@ -386,24 +293,21 @@ static int lp8860_probe(struct i2c_client *client)
if (!child_node)
return -EINVAL;
- led->enable_gpio = devm_gpiod_get_optional(&client->dev,
- "enable", GPIOD_OUT_LOW);
- if (IS_ERR(led->enable_gpio)) {
- ret = PTR_ERR(led->enable_gpio);
- dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
- return ret;
- }
+ enable_gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(enable_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(enable_gpio),
+ "Failed to get enable GPIO\n");
+ devm_add_action_or_reset(&client->dev, lp8860_disable_gpio, enable_gpio);
- led->regulator = devm_regulator_get(&client->dev, "vled");
- if (IS_ERR(led->regulator))
- led->regulator = NULL;
+ ret = devm_regulator_get_enable_optional(&client->dev, "vled");
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to enable vled regulator\n");
led->client = client;
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
- mutex_init(&led->lock);
-
- i2c_set_clientdata(client, led);
+ devm_mutex_init(&client->dev, &led->lock);
led->regmap = devm_regmap_init_i2c(client, &lp8860_regmap_config);
if (IS_ERR(led->regmap)) {
@@ -439,23 +343,6 @@ static int lp8860_probe(struct i2c_client *client)
return 0;
}
-static void lp8860_remove(struct i2c_client *client)
-{
- struct lp8860_led *led = i2c_get_clientdata(client);
- int ret;
-
- gpiod_direction_output(led->enable_gpio, 0);
-
- if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
- dev_err(&led->client->dev,
- "Failed to disable regulator\n");
- }
-
- mutex_destroy(&led->lock);
-}
-
static const struct i2c_device_id lp8860_id[] = {
{ "lp8860" },
{ }
@@ -474,7 +361,6 @@ static struct i2c_driver lp8860_driver = {
.of_match_table = of_lp8860_leds_match,
},
.probe = lp8860_probe,
- .remove = lp8860_remove,
.id_table = lp8860_id,
};
module_i2c_driver(lp8860_driver);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 1b47acf54720..7d4c071a6cd0 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -318,7 +318,8 @@ static int pca9532_gpio_request_pin(struct gpio_chip *gc, unsigned offset)
return -EBUSY;
}
-static void pca9532_gpio_set_value(struct gpio_chip *gc, unsigned offset, int val)
+static int pca9532_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
struct pca9532_data *data = gpiochip_get_data(gc);
struct pca9532_led *led = &data->leds[offset];
@@ -329,6 +330,8 @@ static void pca9532_gpio_set_value(struct gpio_chip *gc, unsigned offset, int va
led->state = PCA9532_OFF;
pca9532_setled(led);
+
+ return 0;
}
static int pca9532_gpio_get_value(struct gpio_chip *gc, unsigned offset)
@@ -351,9 +354,7 @@ static int pca9532_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
static int pca9532_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int val)
{
- pca9532_gpio_set_value(gc, offset, val);
-
- return 0;
+ return pca9532_gpio_set_value(gc, offset, val);
}
#endif /* CONFIG_LEDS_PCA9532_GPIO */
@@ -472,7 +473,7 @@ static int pca9532_configure(struct i2c_client *client,
data->gpio.label = "gpio-pca9532";
data->gpio.direction_input = pca9532_gpio_direction_input;
data->gpio.direction_output = pca9532_gpio_direction_output;
- data->gpio.set = pca9532_gpio_set_value;
+ data->gpio.set_rv = pca9532_gpio_set_value;
data->gpio.get = pca9532_gpio_get_value;
data->gpio.request = pca9532_gpio_request_pin;
data->gpio.can_sleep = 1;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index e9cfde9fe4b1..42fe056b1c74 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -73,7 +73,7 @@ enum pca955x_type {
};
struct pca955x_chipdef {
- int bits;
+ u8 bits;
u8 slv_addr; /* 7-bit slave address mask */
int slv_addr_shift; /* Number of bits to ignore */
int blink_div; /* PSC divider */
@@ -142,13 +142,13 @@ struct pca955x_platform_data {
};
/* 8 bits per input register */
-static inline int pca955x_num_input_regs(int bits)
+static inline u8 pca955x_num_input_regs(u8 bits)
{
return (bits + 7) / 8;
}
/* 4 bits per LED selector register */
-static inline int pca955x_num_led_regs(int bits)
+static inline u8 pca955x_num_led_regs(u8 bits)
{
return (bits + 3) / 4;
}
@@ -495,10 +495,10 @@ static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset,
return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_LOW);
}
-static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
- pca955x_set_value(gc, offset, val);
+ return pca955x_set_value(gc, offset, val);
}
static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
@@ -581,14 +581,14 @@ static int pca955x_probe(struct i2c_client *client)
struct led_classdev *led;
struct led_init_data init_data;
struct i2c_adapter *adapter;
- int i, bit, err, nls, reg;
+ u8 i, nls, psc0;
u8 ls1[4];
u8 ls2[4];
struct pca955x_platform_data *pdata;
- u8 psc0;
bool keep_psc0 = false;
bool set_default_label = false;
char default_label[8];
+ int bit, err, reg;
chip = i2c_get_match_data(client);
if (!chip)
@@ -610,16 +610,15 @@ static int pca955x_probe(struct i2c_client *client)
return -ENODEV;
}
- dev_info(&client->dev, "leds-pca955x: Using %s %d-bit LED driver at "
- "slave address 0x%02x\n", client->name, chip->bits,
- client->addr);
+ dev_info(&client->dev, "Using %s %u-bit LED driver at slave address 0x%02x\n",
+ client->name, chip->bits, client->addr);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
if (pdata->num_leds != chip->bits) {
dev_err(&client->dev,
- "board info claims %d LEDs on a %d-bit chip\n",
+ "board info claims %d LEDs on a %u-bit chip\n",
pdata->num_leds, chip->bits);
return -ENODEV;
}
@@ -694,8 +693,7 @@ static int pca955x_probe(struct i2c_client *client)
}
if (set_default_label) {
- snprintf(default_label, sizeof(default_label),
- "%d", i);
+ snprintf(default_label, sizeof(default_label), "%u", i);
init_data.default_label = default_label;
} else {
init_data.default_label = NULL;
@@ -739,7 +737,7 @@ static int pca955x_probe(struct i2c_client *client)
pca955x->gpio.label = "gpio-pca955x";
pca955x->gpio.direction_input = pca955x_gpio_direction_input;
pca955x->gpio.direction_output = pca955x_gpio_direction_output;
- pca955x->gpio.set = pca955x_gpio_set_value;
+ pca955x->gpio.set_rv = pca955x_gpio_set_value;
pca955x->gpio.get = pca955x_gpio_get_value;
pca955x->gpio.request = pca955x_gpio_request_pin;
pca955x->gpio.free = pca955x_gpio_free_pin;
diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c
index 11c7bb69573e..6ad06ce2bf64 100644
--- a/drivers/leds/leds-pca995x.c
+++ b/drivers/leds/leds-pca995x.c
@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(i2c, pca995x_id);
static const struct of_device_id pca995x_of_match[] = {
{ .compatible = "nxp,pca9952", .data = &pca9952_chipdef },
- { .compatible = "nxp,pca9955b", . data = &pca9955b_chipdef },
+ { .compatible = "nxp,pca9955b", .data = &pca9955b_chipdef },
{ .compatible = "nxp,pca9956b", .data = &pca9956b_chipdef },
{},
};
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index acbd8169723c..89c165c8ee9c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -588,8 +588,8 @@ static int tca6507_blink_set(struct led_classdev *led_cdev,
}
#ifdef CONFIG_GPIOLIB
-static void tca6507_gpio_set_value(struct gpio_chip *gc,
- unsigned offset, int val)
+static int tca6507_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
struct tca6507_chip *tca = gpiochip_get_data(gc);
unsigned long flags;
@@ -604,13 +604,14 @@ static void tca6507_gpio_set_value(struct gpio_chip *gc,
spin_unlock_irqrestore(&tca->lock, flags);
if (tca->reg_set)
schedule_work(&tca->work);
+
+ return 0;
}
static int tca6507_gpio_direction_output(struct gpio_chip *gc,
unsigned offset, int val)
{
- tca6507_gpio_set_value(gc, offset, val);
- return 0;
+ return tca6507_gpio_set_value(gc, offset, val);
}
static int tca6507_probe_gpios(struct device *dev,
@@ -636,7 +637,7 @@ static int tca6507_probe_gpios(struct device *dev,
tca->gpio.base = -1;
tca->gpio.owner = THIS_MODULE;
tca->gpio.direction_output = tca6507_gpio_direction_output;
- tca->gpio.set = tca6507_gpio_set_value;
+ tca->gpio.set_rv = tca6507_gpio_set_value;
tca->gpio.parent = dev;
err = devm_gpiochip_add_data(dev, &tca->gpio, tca);
if (err) {
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 4fe1a9c0bc1b..25ee5c1eb820 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -361,7 +361,7 @@ static DEVICE_ATTR_RW(gamma_correction);
static struct attribute *omnia_led_controller_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_gamma_correction.attr,
- NULL,
+ NULL
};
ATTRIBUTE_GROUPS(omnia_led_controller);
@@ -527,7 +527,7 @@ static void omnia_leds_remove(struct i2c_client *client)
static const struct of_device_id of_omnia_leds_match[] = {
{ .compatible = "cznic,turris-omnia-leds", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_omnia_leds_match);
diff --git a/drivers/leds/rgb/leds-mt6370-rgb.c b/drivers/leds/rgb/leds-mt6370-rgb.c
index ebd3ba878dd5..c5927d0eb830 100644
--- a/drivers/leds/rgb/leds-mt6370-rgb.c
+++ b/drivers/leds/rgb/leds-mt6370-rgb.c
@@ -199,17 +199,17 @@ static const struct reg_field mt6372_reg_fields[F_MAX_FIELDS] = {
/* Current unit: microamp, time unit: millisecond */
static const struct linear_range common_led_ranges[R_MAX_RANGES] = {
- [R_LED123_CURR] = { 4000, 1, 6, 4000 },
- [R_LED4_CURR] = { 2000, 1, 3, 2000 },
- [R_LED_TRFON] = { 125, 0, 15, 200 },
- [R_LED_TOFF] = { 250, 0, 15, 400 },
+ [R_LED123_CURR] = LINEAR_RANGE(4000, 1, 6, 4000),
+ [R_LED4_CURR] = LINEAR_RANGE(2000, 1, 3, 2000),
+ [R_LED_TRFON] = LINEAR_RANGE(125, 0, 15, 200),
+ [R_LED_TOFF] = LINEAR_RANGE(250, 0, 15, 400),
};
static const struct linear_range mt6372_led_ranges[R_MAX_RANGES] = {
- [R_LED123_CURR] = { 2000, 1, 14, 2000 },
- [R_LED4_CURR] = { 2000, 1, 14, 2000 },
- [R_LED_TRFON] = { 125, 0, 15, 250 },
- [R_LED_TOFF] = { 250, 0, 15, 500 },
+ [R_LED123_CURR] = LINEAR_RANGE(2000, 1, 14, 2000),
+ [R_LED4_CURR] = LINEAR_RANGE(2000, 1, 14, 2000),
+ [R_LED_TRFON] = LINEAR_RANGE(125, 0, 15, 250),
+ [R_LED_TOFF] = LINEAR_RANGE(250, 0, 15, 500),
};
static const unsigned int common_tfreqs[] = {
diff --git a/drivers/leds/rgb/leds-ncp5623.c b/drivers/leds/rgb/leds-ncp5623.c
index f18156683375..7c7d44623a9e 100644
--- a/drivers/leds/rgb/leds-ncp5623.c
+++ b/drivers/leds/rgb/leds-ncp5623.c
@@ -155,9 +155,9 @@ static int ncp5623_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct fwnode_handle *mc_node, *led_node;
struct led_init_data init_data = { };
- int num_subleds = 0;
struct ncp5623 *ncp;
struct mc_subled *subled_info;
+ unsigned int num_subleds;
u32 color_index;
u32 reg;
int ret;
@@ -172,8 +172,7 @@ static int ncp5623_probe(struct i2c_client *client)
if (!mc_node)
return -EINVAL;
- fwnode_for_each_child_node(mc_node, led_node)
- num_subleds++;
+ num_subleds = fwnode_get_child_node_count(mc_node);
subled_info = devm_kcalloc(dev, num_subleds, sizeof(*subled_info), GFP_KERNEL);
if (!subled_info) {
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index 1c7705bafdfc..e0d7d3c9215c 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -107,12 +107,12 @@ release_fwnode:
static int led_pwm_mc_probe(struct platform_device *pdev)
{
- struct fwnode_handle *mcnode, *fwnode;
+ struct fwnode_handle *mcnode;
struct led_init_data init_data = {};
struct led_classdev *cdev;
struct mc_subled *subled;
struct pwm_mc_led *priv;
- int count = 0;
+ unsigned int count;
int ret = 0;
mcnode = device_get_named_child_node(&pdev->dev, "multi-led");
@@ -121,8 +121,7 @@ static int led_pwm_mc_probe(struct platform_device *pdev)
"expected multi-led node\n");
/* count the nodes inside the multi-led node */
- fwnode_for_each_child_node(mcnode, fwnode)
- count++;
+ count = fwnode_get_child_node_count(mcnode);
priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds, count),
GFP_KERNEL);
diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 487577d22cfc..c1f0f5becaee 100644
--- a/drivers/leds/trigger/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/fb.h>
#include <linux/leds.h>
#include "../leds.h"
@@ -21,29 +20,20 @@ struct bl_trig_notifier {
struct led_classdev *led;
int brightness;
int old_status;
- struct notifier_block notifier;
unsigned invert;
+
+ struct list_head entry;
};
-static int fb_notifier_callback(struct notifier_block *p,
- unsigned long event, void *data)
+static DEFINE_MUTEX(ledtrig_backlight_list_mutex);
+static LIST_HEAD(ledtrig_backlight_list);
+
+static void ledtrig_backlight_notify_blank(struct bl_trig_notifier *n, int new_status)
{
- struct bl_trig_notifier *n = container_of(p,
- struct bl_trig_notifier, notifier);
struct led_classdev *led = n->led;
- struct fb_event *fb_event = data;
- int *blank;
- int new_status;
-
- /* If we aren't interested in this event, skip it immediately ... */
- if (event != FB_EVENT_BLANK)
- return 0;
-
- blank = fb_event->data;
- new_status = *blank ? BLANK : UNBLANK;
if (new_status == n->old_status)
- return 0;
+ return;
if ((n->old_status == UNBLANK) ^ n->invert) {
n->brightness = led->brightness;
@@ -53,9 +43,19 @@ static int fb_notifier_callback(struct notifier_block *p,
}
n->old_status = new_status;
+}
- return 0;
+void ledtrig_backlight_blank(bool blank)
+{
+ struct bl_trig_notifier *n;
+ int new_status = blank ? BLANK : UNBLANK;
+
+ guard(mutex)(&ledtrig_backlight_list_mutex);
+
+ list_for_each_entry(n, &ledtrig_backlight_list, entry)
+ ledtrig_backlight_notify_blank(n, new_status);
}
+EXPORT_SYMBOL(ledtrig_backlight_blank);
static ssize_t bl_trig_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -100,8 +100,6 @@ ATTRIBUTE_GROUPS(bl_trig);
static int bl_trig_activate(struct led_classdev *led)
{
- int ret;
-
struct bl_trig_notifier *n;
n = kzalloc(sizeof(struct bl_trig_notifier), GFP_KERNEL);
@@ -112,11 +110,9 @@ static int bl_trig_activate(struct led_classdev *led)
n->led = led;
n->brightness = led->brightness;
n->old_status = UNBLANK;
- n->notifier.notifier_call = fb_notifier_callback;
- ret = fb_register_client(&n->notifier);
- if (ret)
- dev_err(led->dev, "unable to register backlight trigger\n");
+ guard(mutex)(&ledtrig_backlight_list_mutex);
+ list_add(&n->entry, &ledtrig_backlight_list);
return 0;
}
@@ -125,7 +121,9 @@ static void bl_trig_deactivate(struct led_classdev *led)
{
struct bl_trig_notifier *n = led_get_trigger_data(led);
- fb_unregister_client(&n->notifier);
+ guard(mutex)(&ledtrig_backlight_list_mutex);
+ list_del(&n->entry);
+
kfree(n);
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index ed52db272f4d..68eeed660a4a 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,16 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config CV1800_MBOX
+ tristate "cv1800 mailbox"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Mailbox driver implementation for Sophgo CV18XX SoCs. This driver
+ can be used to send message between different processors in SoC. Any
+ processer can write data in a channel, and set co-responding register
+ to raise interrupt to notice another processor, and it is allowed to
+ send data to itself.
+
config EXYNOS_MBOX
tristate "Exynos Mailbox"
depends on ARCH_EXYNOS || COMPILE_TEST
@@ -191,8 +201,8 @@ config POLARFIRE_SOC_MAILBOX
config MCHP_SBI_IPC_MBOX
tristate "Microchip Inter-processor Communication (IPC) SBI driver"
- depends on RISCV_SBI || COMPILE_TEST
- depends on ARCH_MICROCHIP
+ depends on RISCV_SBI
+ depends on ARCH_MICROCHIP || COMPILE_TEST
help
Mailbox implementation for Microchip devices with an
Inter-process communication (IPC) controller.
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 9a1542b55539..13a3448b3271 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o
+
obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
diff --git a/drivers/mailbox/cv1800-mailbox.c b/drivers/mailbox/cv1800-mailbox.c
new file mode 100644
index 000000000000..4761191acf78
--- /dev/null
+++ b/drivers/mailbox/cv1800-mailbox.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Yuntao Dai <d1581209858@live.com>
+ * Copyright (C) 2025 Junhui Liu <junhui.liu@pigmoral.tech>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RECV_CPU 1
+
+#define MAILBOX_MAX_CHAN 8
+#define MAILBOX_MSG_LEN 8
+
+#define MBOX_EN_REG(cpu) (cpu << 2)
+#define MBOX_DONE_REG(cpu) ((cpu << 2) + 2)
+#define MBOX_SET_CLR_REG(cpu) (0x10 + (cpu << 4))
+#define MBOX_SET_INT_REG(cpu) (0x18 + (cpu << 4))
+#define MBOX_SET_REG 0x60
+
+#define MAILBOX_CONTEXT_OFFSET 0x0400
+#define MAILBOX_CONTEXT_SIZE 0x0040
+
+#define MBOX_CONTEXT_BASE_INDEX(base, index) \
+ ((u64 __iomem *)(base + MAILBOX_CONTEXT_OFFSET) + index)
+
+/**
+ * struct cv1800_mbox_chan_priv - cv1800 mailbox channel private data
+ * @idx: index of channel
+ * @cpu: send to which processor
+ */
+struct cv1800_mbox_chan_priv {
+ int idx;
+ int cpu;
+};
+
+struct cv1800_mbox {
+ struct mbox_controller mbox;
+ struct cv1800_mbox_chan_priv priv[MAILBOX_MAX_CHAN];
+ struct mbox_chan chans[MAILBOX_MAX_CHAN];
+ u64 __iomem *content[MAILBOX_MAX_CHAN];
+ void __iomem *mbox_base;
+ int recvid;
+};
+
+static irqreturn_t cv1800_mbox_isr(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ size_t i;
+ u64 msg;
+ int ret = IRQ_NONE;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ if (mbox->content[i] && mbox->chans[i].cl) {
+ memcpy_fromio(&msg, mbox->content[i], MAILBOX_MSG_LEN);
+ mbox->content[i] = NULL;
+ mbox_chan_received_data(&mbox->chans[i], (void *)&msg);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t cv1800_mbox_irq(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ u8 set, valid;
+ size_t i;
+ int ret = IRQ_NONE;
+
+ set = readb(mbox->mbox_base + MBOX_SET_INT_REG(RECV_CPU));
+
+ if (!set)
+ return ret;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ valid = set & BIT(i);
+ if (valid) {
+ mbox->content[i] =
+ MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, i);
+ writeb(valid, mbox->mbox_base +
+ MBOX_SET_CLR_REG(RECV_CPU));
+ writeb(~valid, mbox->mbox_base + MBOX_EN_REG(RECV_CPU));
+ ret = IRQ_WAKE_THREAD;
+ }
+ }
+
+ return ret;
+}
+
+static int cv1800_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ int idx = priv->idx;
+ int cpu = priv->cpu;
+ u8 en, valid;
+
+ memcpy_toio(MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, idx),
+ data, MAILBOX_MSG_LEN);
+
+ valid = BIT(idx);
+ writeb(valid, mbox->mbox_base + MBOX_SET_CLR_REG(cpu));
+ en = readb(mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(en | valid, mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(valid, mbox->mbox_base + MBOX_SET_REG);
+
+ return 0;
+}
+
+static bool cv1800_last_tx_done(struct mbox_chan *chan)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ u8 en;
+
+ en = readb(mbox->mbox_base + MBOX_EN_REG(priv->cpu));
+
+ return !(en & BIT(priv->idx));
+}
+
+static const struct mbox_chan_ops cv1800_mbox_chan_ops = {
+ .send_data = cv1800_mbox_send_data,
+ .last_tx_done = cv1800_last_tx_done,
+};
+
+static struct mbox_chan *cv1800_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct cv1800_mbox_chan_priv *priv;
+
+ int idx = spec->args[0];
+ int cpu = spec->args[1];
+
+ if (idx >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ priv = mbox->chans[idx].con_priv;
+ priv->cpu = cpu;
+
+ return &mbox->chans[idx];
+}
+
+static const struct of_device_id cv1800_mbox_of_match[] = {
+ { .compatible = "sophgo,cv1800b-mailbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cv1800_mbox_of_match);
+
+static int cv1800_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800_mbox *mb;
+ int irq, idx, err;
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mb->mbox_base))
+ return dev_err_probe(dev, PTR_ERR(mb->mbox_base),
+ "Failed to map resource\n");
+
+ mb->mbox.dev = dev;
+ mb->mbox.chans = mb->chans;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.ops = &cv1800_mbox_chan_ops;
+ mb->mbox.num_chans = MAILBOX_MAX_CHAN;
+ mb->mbox.of_xlate = cv1800_mbox_xlate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_threaded_irq(dev, irq, cv1800_mbox_irq,
+ cv1800_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to register irq\n");
+
+ for (idx = 0; idx < MAILBOX_MAX_CHAN; idx++) {
+ mb->priv[idx].idx = idx;
+ mb->mbox.chans[idx].con_priv = &mb->priv[idx];
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ err = devm_mbox_controller_register(dev, &mb->mbox);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register mailbox\n");
+
+ return 0;
+}
+
+static struct platform_driver cv1800_mbox_driver = {
+ .driver = {
+ .name = "cv1800-mbox",
+ .of_match_table = cv1800_mbox_of_match,
+ },
+ .probe = cv1800_mbox_probe,
+};
+
+module_platform_driver(cv1800_mbox_driver);
+
+MODULE_DESCRIPTION("cv1800 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 6ef8338add0d..6778afc64a04 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -226,7 +226,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
{
u32 *arg = data;
u32 val;
- int ret;
+ int ret, count;
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -240,11 +240,20 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
case IMX_MU_TYPE_TXDB_V2:
imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
priv->dcfg->xCR[IMX_MU_GCR]);
- ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
- !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
- 0, 1000);
- if (ret)
- dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type);
+ ret = -ETIMEDOUT;
+ count = 0;
+ while (ret && (count < 10)) {
+ ret =
+ readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
+ !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
+ 0, 10000);
+
+ if (ret) {
+ dev_warn_ratelimited(priv->dev,
+ "channel type: %d timeout, %d times, retry\n",
+ cp->type, ++count);
+ }
+ }
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 0593b4d03685..5cd8ae222073 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -6,6 +6,7 @@
* Author: Jassi Brar <jassisinghbrar@gmail.com>
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -24,15 +25,12 @@ static DEFINE_MUTEX(con_mutex);
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ guard(spinlock_irqsave)(&chan->lock);
/* See if there is any space left */
- if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN)
return -ENOBUFS;
- }
idx = chan->msg_free;
chan->msg_data[idx] = mssg;
@@ -43,60 +41,53 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
else
chan->msg_free++;
- spin_unlock_irqrestore(&chan->lock, flags);
-
return idx;
}
static void msg_submit(struct mbox_chan *chan)
{
unsigned count, idx;
- unsigned long flags;
void *data;
int err = -EBUSY;
- spin_lock_irqsave(&chan->lock, flags);
-
- if (!chan->msg_count || chan->active_req)
- goto exit;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ if (!chan->msg_count || chan->active_req)
+ break;
- count = chan->msg_count;
- idx = chan->msg_free;
- if (idx >= count)
- idx -= count;
- else
- idx += MBOX_TX_QUEUE_LEN - count;
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
- data = chan->msg_data[idx];
+ data = chan->msg_data[idx];
- if (chan->cl->tx_prepare)
- chan->cl->tx_prepare(chan->cl, data);
- /* Try to submit a message to the MBOX controller */
- err = chan->mbox->ops->send_data(chan, data);
- if (!err) {
- chan->active_req = data;
- chan->msg_count--;
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
}
-exit:
- spin_unlock_irqrestore(&chan->lock, flags);
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
/* kick start the timer immediately to avoid delays */
- spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
- spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock)
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
}
}
static void tx_tick(struct mbox_chan *chan, int r)
{
- unsigned long flags;
void *mssg;
- spin_lock_irqsave(&chan->lock, flags);
- mssg = chan->active_req;
- chan->active_req = NULL;
- spin_unlock_irqrestore(&chan->lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ }
/* Submit next message */
msg_submit(chan);
@@ -118,7 +109,6 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
- unsigned long flags;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
@@ -133,10 +123,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
}
if (resched) {
- spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
- if (!hrtimer_is_queued(hrtimer))
- hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
- spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) {
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ }
return HRTIMER_RESTART;
}
@@ -318,25 +308,23 @@ EXPORT_SYMBOL_GPL(mbox_flush);
static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
struct device *dev = cl->dev;
- unsigned long flags;
int ret;
if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
- dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ dev_err(dev, "%s: mailbox not free\n", __func__);
return -EBUSY;
}
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
-
- if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method = TXDONE_BY_ACK;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+ }
if (chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
@@ -370,13 +358,9 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
*/
int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
- int ret;
-
- mutex_lock(&con_mutex);
- ret = __mbox_bind_client(chan, cl);
- mutex_unlock(&con_mutex);
+ guard(mutex)(&con_mutex);
- return ret;
+ return __mbox_bind_client(chan, cl);
}
EXPORT_SYMBOL_GPL(mbox_bind_client);
@@ -413,32 +397,29 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
index, &spec);
if (ret) {
- dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
return ERR_PTR(ret);
}
- mutex_lock(&con_mutex);
+ scoped_guard(mutex, &con_mutex) {
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
- chan = ERR_PTR(-EPROBE_DEFER);
- list_for_each_entry(mbox, &mbox_cons, node)
- if (mbox->dev->of_node == spec.np) {
- chan = mbox->of_xlate(mbox, &spec);
- if (!IS_ERR(chan))
- break;
- }
+ of_node_put(spec.np);
- of_node_put(spec.np);
+ if (IS_ERR(chan))
+ return chan;
- if (IS_ERR(chan)) {
- mutex_unlock(&con_mutex);
- return chan;
+ ret = __mbox_bind_client(chan, cl);
+ if (ret)
+ chan = ERR_PTR(ret);
}
- ret = __mbox_bind_client(chan, cl);
- if (ret)
- chan = ERR_PTR(ret);
-
- mutex_unlock(&con_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(mbox_request_channel);
@@ -458,7 +439,7 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
if (index < 0) {
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(index);
}
return mbox_request_channel(cl, index);
}
@@ -471,8 +452,6 @@ EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
*/
void mbox_free_channel(struct mbox_chan *chan)
{
- unsigned long flags;
-
if (!chan || !chan->cl)
return;
@@ -480,14 +459,14 @@ void mbox_free_channel(struct mbox_chan *chan)
chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
- spin_lock_irqsave(&chan->lock, flags);
- chan->cl = NULL;
- chan->active_req = NULL;
- if (chan->txdone_method == TXDONE_BY_ACK)
- chan->txdone_method = TXDONE_BY_POLL;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+ }
module_put(chan->mbox->dev->driver->owner);
- spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
@@ -547,9 +526,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
if (!mbox->of_xlate)
mbox->of_xlate = of_mbox_index_xlate;
- mutex_lock(&con_mutex);
- list_add_tail(&mbox->node, &mbox_cons);
- mutex_unlock(&con_mutex);
+ scoped_guard(mutex, &con_mutex)
+ list_add_tail(&mbox->node, &mbox_cons);
return 0;
}
@@ -566,17 +544,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox)
if (!mbox)
return;
- mutex_lock(&con_mutex);
-
- list_del(&mbox->node);
-
- for (i = 0; i < mbox->num_chans; i++)
- mbox_free_channel(&mbox->chans[i]);
+ scoped_guard(mutex, &con_mutex) {
+ list_del(&mbox->node);
- if (mbox->txdone_poll)
- hrtimer_cancel(&mbox->poll_hrt);
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
- mutex_unlock(&con_mutex);
+ if (mbox->txdone_poll)
+ hrtimer_cancel(&mbox->poll_hrt);
+ }
}
EXPORT_SYMBOL_GPL(mbox_controller_unregister);
@@ -587,16 +563,6 @@ static void __devm_mbox_controller_unregister(struct device *dev, void *res)
mbox_controller_unregister(*mbox);
}
-static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
-{
- struct mbox_controller **mbox = res;
-
- if (WARN_ON(!mbox || !*mbox))
- return 0;
-
- return *mbox == data;
-}
-
/**
* devm_mbox_controller_register() - managed mbox_controller_register()
* @dev: device owning the mailbox controller being registered
@@ -632,20 +598,3 @@ int devm_mbox_controller_register(struct device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
-
-/**
- * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
- * @dev: device owning the mailbox controller being unregistered
- * @mbox: mailbox controller being unregistered
- *
- * This function unregisters the mailbox controller and removes the device-
- * managed resource that was set up to automatically unregister the mailbox
- * controller on driver probe failure or driver removal. It's typically not
- * necessary to call this function.
- */
-void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
-{
- WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
- devm_mbox_controller_match, mbox));
-}
-EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index d186865b8dce..ab4e8d1954a1 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -92,18 +92,6 @@ struct gce_plat {
u32 gce_num;
};
-static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
-{
- WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
-
- if (enable)
- writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
- else
- writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
-
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
-}
-
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
@@ -112,6 +100,19 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan)
}
EXPORT_SYMBOL(cmdq_get_shift_pa);
+static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable)
+{
+ u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0;
+
+ if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en)
+ return;
+
+ if (cmdq->pdata->sw_ddr_en && ddr_enable)
+ val |= GCE_DDR_EN;
+
+ writel(val, cmdq->base + GCE_GCTL_VALUE);
+}
+
static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
{
u32 status;
@@ -140,16 +141,10 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
static void cmdq_init(struct cmdq *cmdq)
{
int i;
- u32 gctl_regval = 0;
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
- if (cmdq->pdata->control_by_sw)
- gctl_regval = GCE_CTRL_BY_SW;
- if (cmdq->pdata->sw_ddr_en)
- gctl_regval |= GCE_DDR_EN;
- if (gctl_regval)
- writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
+ cmdq_gctl_value_toggle(cmdq, true);
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
@@ -315,14 +310,21 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
static int cmdq_runtime_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ int ret;
- return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ if (ret)
+ return ret;
+
+ cmdq_gctl_value_toggle(cmdq, true);
+ return 0;
}
static int cmdq_runtime_suspend(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ cmdq_gctl_value_toggle(cmdq, false);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
@@ -347,9 +349,6 @@ static int cmdq_suspend(struct device *dev)
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
return pm_runtime_force_suspend(dev);
}
@@ -360,9 +359,6 @@ static int cmdq_resume(struct device *dev)
WARN_ON(pm_runtime_force_resume(dev));
cmdq->suspended = false;
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, true);
-
return 0;
}
@@ -370,9 +366,6 @@ static void cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
if (!IS_ENABLED(CONFIG_PM))
cmdq_runtime_suspend(&pdev->dev);
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 11c41e935a36..8b24ec0fa191 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -116,10 +116,18 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
}
if (apcs_data->clk_name) {
- apcs->clk = platform_device_register_data(&pdev->dev,
- apcs_data->clk_name,
- PLATFORM_DEVID_AUTO,
- NULL, 0);
+ struct device_node *np = of_get_child_by_name(pdev->dev.of_node,
+ "clock-controller");
+ struct platform_device_info pdevinfo = {
+ .parent = &pdev->dev,
+ .name = apcs_data->clk_name,
+ .id = PLATFORM_DEVID_AUTO,
+ .fwnode = of_fwnode_handle(np) ?: pdev->dev.fwnode,
+ .of_node_reused = !np,
+ };
+
+ apcs->clk = platform_device_register_full(&pdevinfo);
+ of_node_put(np);
if (IS_ERR(apcs->clk))
dev_err(&pdev->dev, "failed to register APCS clk\n");
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index d098e75e3461..ec84ba5e93e5 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -41,16 +41,6 @@
#define DM_BUFIO_LOW_WATERMARK_RATIO 16
/*
- * Check buffer ages in this interval (seconds)
- */
-#define DM_BUFIO_WORK_TIMER_SECS 30
-
-/*
- * Free buffers when they are older than this (seconds)
- */
-#define DM_BUFIO_DEFAULT_AGE_SECS 300
-
-/*
* The nr of bytes of cached data to keep around.
*/
#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
@@ -1057,10 +1047,8 @@ static unsigned long dm_bufio_cache_size_latch;
static DEFINE_SPINLOCK(global_spinlock);
-/*
- * Buffers are freed after this timeout
- */
-static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
+static unsigned int dm_bufio_max_age; /* No longer does anything */
+
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
@@ -1088,7 +1076,6 @@ static LIST_HEAD(dm_bufio_all_clients);
static DEFINE_MUTEX(dm_bufio_clients_lock);
static struct workqueue_struct *dm_bufio_wq;
-static struct delayed_work dm_bufio_cleanup_old_work;
static struct work_struct dm_bufio_replacement_work;
@@ -2680,130 +2667,6 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
/*--------------------------------------------------------------*/
-static unsigned int get_max_age_hz(void)
-{
- unsigned int max_age = READ_ONCE(dm_bufio_max_age);
-
- if (max_age > UINT_MAX / HZ)
- max_age = UINT_MAX / HZ;
-
- return max_age * HZ;
-}
-
-static bool older_than(struct dm_buffer *b, unsigned long age_hz)
-{
- return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
-}
-
-struct evict_params {
- gfp_t gfp;
- unsigned long age_hz;
-
- /*
- * This gets updated with the largest last_accessed (ie. most
- * recently used) of the evicted buffers. It will not be reinitialised
- * by __evict_many(), so you can use it across multiple invocations.
- */
- unsigned long last_accessed;
-};
-
-/*
- * We may not be able to evict this buffer if IO pending or the client
- * is still using it.
- *
- * And if GFP_NOFS is used, we must not do any I/O because we hold
- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
- * rerouted to different bufio client.
- */
-static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
-{
- struct evict_params *params = context;
-
- if (!(params->gfp & __GFP_FS) ||
- (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
- if (test_bit_acquire(B_READING, &b->state) ||
- test_bit(B_WRITING, &b->state) ||
- test_bit(B_DIRTY, &b->state))
- return ER_DONT_EVICT;
- }
-
- return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
-}
-
-static unsigned long __evict_many(struct dm_bufio_client *c,
- struct evict_params *params,
- int list_mode, unsigned long max_count)
-{
- unsigned long count;
- unsigned long last_accessed;
- struct dm_buffer *b;
-
- for (count = 0; count < max_count; count++) {
- b = cache_evict(&c->cache, list_mode, select_for_evict, params);
- if (!b)
- break;
-
- last_accessed = READ_ONCE(b->last_accessed);
- if (time_after_eq(params->last_accessed, last_accessed))
- params->last_accessed = last_accessed;
-
- __make_buffer_clean(b);
- __free_buffer_wake(b);
-
- cond_resched();
- }
-
- return count;
-}
-
-static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
-{
- struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
- unsigned long retain = get_retain_buffers(c);
- unsigned long count;
- LIST_HEAD(write_list);
-
- dm_bufio_lock(c);
-
- __check_watermark(c, &write_list);
- if (unlikely(!list_empty(&write_list))) {
- dm_bufio_unlock(c);
- __flush_write_list(&write_list);
- dm_bufio_lock(c);
- }
-
- count = cache_total(&c->cache);
- if (count > retain)
- __evict_many(c, &params, LIST_CLEAN, count - retain);
-
- dm_bufio_unlock(c);
-}
-
-static void cleanup_old_buffers(void)
-{
- unsigned long max_age_hz = get_max_age_hz();
- struct dm_bufio_client *c;
-
- mutex_lock(&dm_bufio_clients_lock);
-
- __cache_size_refresh();
-
- list_for_each_entry(c, &dm_bufio_all_clients, client_list)
- evict_old_buffers(c, max_age_hz);
-
- mutex_unlock(&dm_bufio_clients_lock);
-}
-
-static void work_fn(struct work_struct *w)
-{
- cleanup_old_buffers();
-
- queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
- DM_BUFIO_WORK_TIMER_SECS * HZ);
-}
-
-/*--------------------------------------------------------------*/
-
/*
* Global cleanup tries to evict the oldest buffers from across _all_
* the clients. It does this by repeatedly evicting a few buffers from
@@ -2841,27 +2704,51 @@ static void __insert_client(struct dm_bufio_client *new_client)
list_add_tail(&new_client->client_list, h);
}
+static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
+{
+ /* In no-sleep mode, we cannot wait on IO. */
+ if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) {
+ if (test_bit_acquire(B_READING, &b->state) ||
+ test_bit(B_WRITING, &b->state) ||
+ test_bit(B_DIRTY, &b->state))
+ return ER_DONT_EVICT;
+ }
+ return ER_EVICT;
+}
+
static unsigned long __evict_a_few(unsigned long nr_buffers)
{
- unsigned long count;
struct dm_bufio_client *c;
- struct evict_params params = {
- .gfp = GFP_KERNEL,
- .age_hz = 0,
- /* set to jiffies in case there are no buffers in this client */
- .last_accessed = jiffies
- };
+ unsigned long oldest_buffer = jiffies;
+ unsigned long last_accessed;
+ unsigned long count;
+ struct dm_buffer *b;
c = __pop_client();
if (!c)
return 0;
dm_bufio_lock(c);
- count = __evict_many(c, &params, LIST_CLEAN, nr_buffers);
+
+ for (count = 0; count < nr_buffers; count++) {
+ b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL);
+ if (!b)
+ break;
+
+ last_accessed = READ_ONCE(b->last_accessed);
+ if (time_after_eq(oldest_buffer, last_accessed))
+ oldest_buffer = last_accessed;
+
+ __make_buffer_clean(b);
+ __free_buffer_wake(b);
+
+ cond_resched();
+ }
+
dm_bufio_unlock(c);
if (count)
- c->oldest_buffer = params.last_accessed;
+ c->oldest_buffer = oldest_buffer;
__insert_client(c);
return count;
@@ -2944,10 +2831,7 @@ static int __init dm_bufio_init(void)
if (!dm_bufio_wq)
return -ENOMEM;
- INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
- queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
- DM_BUFIO_WORK_TIMER_SECS * HZ);
return 0;
}
@@ -2959,7 +2843,6 @@ static void __exit dm_bufio_exit(void)
{
int bug = 0;
- cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
destroy_workqueue(dm_bufio_wq);
if (dm_bufio_client_count) {
@@ -2996,7 +2879,7 @@ module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
-MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
+MODULE_PARM_DESC(max_age_seconds, "No longer does anything");
module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 3637761f3585..c889332e533b 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -141,6 +141,7 @@ struct mapped_device {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_zones;
void *zone_revalidate_map;
+ struct task_struct *revalidate_map_task;
#endif
#ifdef CONFIG_IMA
@@ -162,9 +163,6 @@ struct mapped_device {
#define DMF_POST_SUSPENDING 8
#define DMF_EMULATE_ZONE_APPEND 9
-void disable_discard(struct mapped_device *md);
-void disable_write_zeroes(struct mapped_device *md);
-
static inline sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index d4cf0ac2a7aa..16d3d454fb0a 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -14,11 +14,14 @@
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/delay.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "delay"
+#define SLEEP_SHIFT 3
+
struct delay_class {
struct dm_dev *dev;
sector_t start;
@@ -34,6 +37,7 @@ struct delay_c {
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
struct task_struct *worker;
+ unsigned int worker_sleep_us;
bool may_delay;
struct delay_class read;
@@ -136,6 +140,7 @@ static int flush_worker_fn(void *data)
schedule();
} else {
spin_unlock(&dc->delayed_bios_lock);
+ fsleep(dc->worker_sleep_us);
cond_resched();
}
}
@@ -212,7 +217,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct delay_c *dc;
int ret;
- unsigned int max_delay;
+ unsigned int max_delay, min_delay;
if (argc != 3 && argc != 6 && argc != 9) {
ti->error = "Requires exactly 3, 6 or 9 arguments";
@@ -235,7 +240,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ret = delay_class_ctr(ti, &dc->read, argv);
if (ret)
goto bad;
- max_delay = dc->read.delay;
+ min_delay = max_delay = dc->read.delay;
if (argc == 3) {
ret = delay_class_ctr(ti, &dc->write, argv);
@@ -251,6 +256,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret)
goto bad;
max_delay = max(max_delay, dc->write.delay);
+ min_delay = min_not_zero(min_delay, dc->write.delay);
if (argc == 6) {
ret = delay_class_ctr(ti, &dc->flush, argv + 3);
@@ -263,9 +269,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret)
goto bad;
max_delay = max(max_delay, dc->flush.delay);
+ min_delay = min_not_zero(min_delay, dc->flush.delay);
out:
if (max_delay < 50) {
+ if (min_delay >> SLEEP_SHIFT)
+ dc->worker_sleep_us = 1000;
+ else
+ dc->worker_sleep_us = (min_delay * 1000) >> SLEEP_SHIFT;
/*
* In case of small requested delays, use kthread instead of
* timers and workqueue to achieve better latency.
@@ -438,7 +449,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = delay_ctr,
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 1a33820c9f46..e75310232bbf 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -534,7 +534,9 @@ static void dust_status(struct dm_target *ti, status_type_t type,
}
}
-static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct dust_device *dd = ti->private;
struct dm_dev *dev = dd->dev;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index b19b0142a690..6abb31ca9662 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -415,7 +415,8 @@ static void ebs_status(struct dm_target *ti, status_type_t type,
}
}
-static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg, bool *forward)
{
struct ebs_c *ec = ti->private;
struct dm_dev *dev = ec->dev;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b690905ab89f..c711db6f8f5c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -47,14 +47,15 @@ enum feature_flag_bits {
};
struct per_bio_data {
- bool bio_submitted;
+ bool bio_can_corrupt;
+ struct bvec_iter saved_iter;
};
static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
- int r;
- unsigned int argc;
+ int r = 0;
+ unsigned int argc = 0;
const char *arg_name;
static const struct dm_arg _args[] = {
@@ -65,14 +66,13 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
{0, PROBABILITY_BASE, "Invalid random corrupt argument"},
};
- /* No feature arguments supplied. */
- if (!as->argc)
- return 0;
-
- r = dm_read_arg_group(_args, as, &argc, &ti->error);
- if (r)
+ if (as->argc && (r = dm_read_arg_group(_args, as, &argc, &ti->error)))
return r;
+ /* No feature arguments supplied. */
+ if (!argc)
+ goto error_all_io;
+
while (argc) {
arg_name = dm_shift_arg(as);
argc--;
@@ -128,8 +128,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
*/
if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
- if (!argc) {
- ti->error = "Feature corrupt_bio_byte requires parameters";
+ if (fc->corrupt_bio_byte) {
+ ti->error = "Feature corrupt_bio_byte duplicated";
+ return -EINVAL;
+ } else if (argc < 4) {
+ ti->error = "Feature corrupt_bio_byte requires 4 parameters";
return -EINVAL;
}
@@ -176,7 +179,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
}
if (!strcasecmp(arg_name, "random_read_corrupt")) {
- if (!argc) {
+ if (fc->random_read_corrupt) {
+ ti->error = "Feature random_read_corrupt duplicated";
+ return -EINVAL;
+ } else if (!argc) {
ti->error = "Feature random_read_corrupt requires a parameter";
return -EINVAL;
}
@@ -189,7 +195,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
}
if (!strcasecmp(arg_name, "random_write_corrupt")) {
- if (!argc) {
+ if (fc->random_write_corrupt) {
+ ti->error = "Feature random_write_corrupt duplicated";
+ return -EINVAL;
+ } else if (!argc) {
ti->error = "Feature random_write_corrupt requires a parameter";
return -EINVAL;
}
@@ -205,18 +214,25 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
return -EINVAL;
}
- if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
- ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ if (test_bit(DROP_WRITES, &fc->flags) &&
+ (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
- } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
- ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ } else if (test_bit(ERROR_WRITES, &fc->flags) &&
+ (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
+ } else if (test_bit(ERROR_READS, &fc->flags) &&
+ (fc->corrupt_bio_rw == READ || fc->random_read_corrupt)) {
+ ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set";
return -EINVAL;
}
if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
!test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) &&
!fc->random_read_corrupt && !fc->random_write_corrupt) {
+error_all_io:
set_bit(ERROR_WRITES, &fc->flags);
set_bit(ERROR_READS, &fc->flags);
}
@@ -278,7 +294,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
- r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
+ r = dm_read_arg(_args + 1, &as, &fc->down_interval, &ti->error);
if (r)
goto bad;
@@ -339,7 +355,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
}
static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
- unsigned char corrupt_bio_value)
+ unsigned char corrupt_bio_value,
+ struct bvec_iter start)
{
struct bvec_iter iter;
struct bio_vec bvec;
@@ -348,7 +365,7 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
* Overwrite the Nth byte of the bio's data, on whichever page
* it falls.
*/
- bio_for_each_segment(bvec, bio, iter) {
+ __bio_for_each_segment(bvec, bio, iter, start) {
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
unsigned char *segment = bvec_kmap_local(&bvec);
segment[corrupt_bio_byte] = corrupt_bio_value;
@@ -357,36 +374,31 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
bio, corrupt_bio_value, corrupt_bio_byte,
(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
- (unsigned long long)bio->bi_iter.bi_sector,
- bio->bi_iter.bi_size);
+ (unsigned long long)start.bi_sector,
+ start.bi_size);
break;
}
corrupt_bio_byte -= bio_iter_len(bio, iter);
}
}
-static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc,
+ struct bvec_iter start)
{
unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
- if (!bio_has_data(bio))
- return;
-
- corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value);
+ corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start);
}
-static void corrupt_bio_random(struct bio *bio)
+static void corrupt_bio_random(struct bio *bio, struct bvec_iter start)
{
unsigned int corrupt_byte;
unsigned char corrupt_value;
- if (!bio_has_data(bio))
- return;
-
- corrupt_byte = get_random_u32() % bio->bi_iter.bi_size;
+ corrupt_byte = get_random_u32() % start.bi_size;
corrupt_value = get_random_u8();
- corrupt_bio_common(bio, corrupt_byte, corrupt_value);
+ corrupt_bio_common(bio, corrupt_byte, corrupt_value, start);
}
static void clone_free(struct bio *clone)
@@ -481,7 +493,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
unsigned int elapsed;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
- pb->bio_submitted = false;
+ pb->bio_can_corrupt = false;
if (op_is_zone_mgmt(bio_op(bio)))
goto map_bio;
@@ -490,14 +502,15 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
elapsed = (jiffies - fc->start_time) / HZ;
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
bool corrupt_fixed, corrupt_random;
- /*
- * Flag this bio as submitted while down.
- */
- pb->bio_submitted = true;
+
+ if (bio_has_data(bio)) {
+ pb->bio_can_corrupt = true;
+ pb->saved_iter = bio->bi_iter;
+ }
/*
- * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
- * Otherwise, flakey_end_io() will decide if the reads should be modified.
+ * If ERROR_READS isn't set flakey_end_io() will decide if the
+ * reads should be modified.
*/
if (bio_data_dir(bio) == READ) {
if (test_bit(ERROR_READS, &fc->flags))
@@ -516,6 +529,8 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+ if (!pb->bio_can_corrupt)
+ goto map_bio;
/*
* Corrupt matching writes.
*/
@@ -535,9 +550,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
struct bio *clone = clone_bio(ti, fc, bio);
if (clone) {
if (corrupt_fixed)
- corrupt_bio_data(clone, fc);
+ corrupt_bio_data(clone, fc,
+ clone->bi_iter);
if (corrupt_random)
- corrupt_bio_random(clone);
+ corrupt_bio_random(clone,
+ clone->bi_iter);
submit_bio(clone);
return DM_MAPIO_SUBMITTED;
}
@@ -559,28 +576,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
if (op_is_zone_mgmt(bio_op(bio)))
return DM_ENDIO_DONE;
- if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+ if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) {
if (fc->corrupt_bio_byte) {
if ((fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc)) {
/*
* Corrupt successful matching READs while in down state.
*/
- corrupt_bio_data(bio, fc);
+ corrupt_bio_data(bio, fc, pb->saved_iter);
}
}
if (fc->random_read_corrupt) {
u64 rnd = get_random_u64();
u32 rem = do_div(rnd, PROBABILITY_BASE);
if (rem < fc->random_read_corrupt)
- corrupt_bio_random(bio);
- }
- if (test_bit(ERROR_READS, &fc->flags)) {
- /*
- * Error read during the down_interval if drop_writes
- * and error_writes were not configured.
- */
- *error = BLK_STS_IOERR;
+ corrupt_bio_random(bio, pb->saved_iter);
}
}
@@ -638,7 +648,9 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
}
}
-static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct flakey_c *fc = ti->private;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index d42eac944eb5..4165fef4c170 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1885,6 +1885,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry},
{DM_DEV_ARM_POLL_CMD, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll},
{DM_GET_TARGET_VERSION_CMD, 0, get_target_version},
+ {DM_MPATH_PROBE_PATHS_CMD, 0, NULL}, /* block device ioctl */
};
if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 66318aba4bdb..15538ec58f8e 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -119,7 +119,9 @@ static void linear_status(struct dm_target *ti, status_type_t type,
}
}
-static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct linear_c *lc = ti->private;
struct dm_dev *dev = lc->dev;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 8d7df8303d0a..d484e8e1d48a 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -818,7 +818,9 @@ static void log_writes_status(struct dm_target *ti, status_type_t type,
}
static int log_writes_prepare_ioctl(struct dm_target *ti,
- struct block_device **bdev)
+ struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct log_writes_c *lc = ti->private;
struct dm_dev *dev = lc->dev;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6c98f4ae5ea9..81fec2e1e0ef 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -79,6 +79,7 @@ struct multipath {
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
+ struct priority_group *last_probed_pg;
atomic_t nr_valid_paths; /* Total number of usable paths */
unsigned int nr_priority_groups;
@@ -87,6 +88,7 @@ struct multipath {
const char *hw_handler_name;
char *hw_handler_params;
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+ wait_queue_head_t probe_wait; /* Wait for probing paths */
unsigned int pg_init_retries; /* Number of times to retry pg_init */
unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
@@ -100,6 +102,7 @@ struct multipath {
struct bio_list queued_bios;
struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
+ bool is_suspending;
};
/*
@@ -132,6 +135,8 @@ static void queue_if_no_path_timeout_work(struct timer_list *t);
#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
+#define MPATHF_DELAY_PG_SWITCH 7 /* Delay switching pg if it still has paths */
+#define MPATHF_NEED_PG_SWITCH 8 /* Need to switch pgs after the delay has ended */
static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
{
@@ -254,6 +259,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
atomic_set(&m->pg_init_count, 0);
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
init_waitqueue_head(&m->pg_init_wait);
+ init_waitqueue_head(&m->probe_wait);
return 0;
}
@@ -413,13 +419,21 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
goto failed;
}
+ /* Don't change PG until it has no remaining paths */
+ pg = READ_ONCE(m->current_pg);
+ if (pg) {
+ pgpath = choose_path_in_pg(m, pg, nr_bytes);
+ if (!IS_ERR_OR_NULL(pgpath))
+ return pgpath;
+ }
+
/* Were we instructed to switch PG? */
if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
if (!pg) {
spin_unlock_irqrestore(&m->lock, flags);
- goto check_current_pg;
+ goto check_all_pgs;
}
m->next_pg = NULL;
spin_unlock_irqrestore(&m->lock, flags);
@@ -427,16 +441,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
if (!IS_ERR_OR_NULL(pgpath))
return pgpath;
}
-
- /* Don't change PG until it has no remaining paths */
-check_current_pg:
- pg = READ_ONCE(m->current_pg);
- if (pg) {
- pgpath = choose_path_in_pg(m, pg, nr_bytes);
- if (!IS_ERR_OR_NULL(pgpath))
- return pgpath;
- }
-
+check_all_pgs:
/*
* Loop through priority groups until we find a valid path.
* First time we skip PGs marked 'bypassed'.
@@ -612,7 +617,6 @@ static void multipath_queue_bio(struct multipath *m, struct bio *bio)
static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
{
struct pgpath *pgpath;
- unsigned long flags;
/* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath);
@@ -620,12 +624,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
if (!pgpath) {
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
__multipath_queue_bio(m, bio);
pgpath = ERR_PTR(-EAGAIN);
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
@@ -688,7 +692,6 @@ static void process_queued_io_list(struct multipath *m)
static void process_queued_bios(struct work_struct *work)
{
int r;
- unsigned long flags;
struct bio *bio;
struct bio_list bios;
struct blk_plug plug;
@@ -697,16 +700,16 @@ static void process_queued_bios(struct work_struct *work)
bio_list_init(&bios);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (bio_list_empty(&m->queued_bios)) {
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
return;
}
bio_list_merge_init(&bios, &m->queued_bios);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
@@ -1190,7 +1193,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct dm_arg_set as;
unsigned int pg_count = 0;
unsigned int next_pg_num;
- unsigned long flags;
as.argc = argc;
as.argv = argv;
@@ -1255,9 +1257,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
enable_nopath_timeout(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
@@ -1292,23 +1294,21 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
if (m->hw_handler_name) {
- unsigned long flags;
-
if (!atomic_read(&m->pg_init_in_progress))
goto skip;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (atomic_read(&m->pg_init_in_progress) &&
!test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
skip:
if (m->queue_mode == DM_TYPE_BIO_BASED)
@@ -1370,11 +1370,10 @@ out:
static int reinstate_path(struct pgpath *pgpath)
{
int r = 0, run_queue = 0;
- unsigned long flags;
struct multipath *m = pgpath->pg->m;
unsigned int nr_valid_paths;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (pgpath->is_active)
goto out;
@@ -1404,7 +1403,7 @@ static int reinstate_path(struct pgpath *pgpath)
schedule_work(&m->trigger_event);
out:
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
if (run_queue) {
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
@@ -1439,15 +1438,19 @@ static int action_dev(struct multipath *m, dev_t dev, action_fn action)
* Temporarily try to avoid having to use the specified PG
*/
static void bypass_pg(struct multipath *m, struct priority_group *pg,
- bool bypassed)
+ bool bypassed, bool can_be_delayed)
{
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
pg->bypassed = bypassed;
- m->current_pgpath = NULL;
- m->current_pg = NULL;
+ if (can_be_delayed && test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags))
+ set_bit(MPATHF_NEED_PG_SWITCH, &m->flags);
+ else {
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
spin_unlock_irqrestore(&m->lock, flags);
@@ -1461,7 +1464,6 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
{
struct priority_group *pg;
unsigned int pgnum;
- unsigned long flags;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
@@ -1470,17 +1472,21 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
return -EINVAL;
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
list_for_each_entry(pg, &m->priority_groups, list) {
pg->bypassed = false;
if (--pgnum)
continue;
- m->current_pgpath = NULL;
- m->current_pg = NULL;
+ if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags))
+ set_bit(MPATHF_NEED_PG_SWITCH, &m->flags);
+ else {
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
m->next_pg = pg;
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
schedule_work(&m->trigger_event);
return 0;
@@ -1507,7 +1513,7 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
break;
}
- bypass_pg(m, pg, bypassed);
+ bypass_pg(m, pg, bypassed, true);
return 0;
}
@@ -1561,7 +1567,7 @@ static void pg_init_done(void *data, int errors)
* Probably doing something like FW upgrade on the
* controller so try the other pg.
*/
- bypass_pg(m, pg, true);
+ bypass_pg(m, pg, true, false);
break;
case SCSI_DH_RETRY:
/* Wait before retrying. */
@@ -1742,6 +1748,9 @@ static void multipath_presuspend(struct dm_target *ti)
{
struct multipath *m = ti->private;
+ spin_lock_irq(&m->lock);
+ m->is_suspending = true;
+ spin_unlock_irq(&m->lock);
/* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
queue_if_no_path(m, false, true, __func__);
@@ -1762,9 +1771,9 @@ static void multipath_postsuspend(struct dm_target *ti)
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = ti->private;
- unsigned long flags;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
+ m->is_suspending = false;
if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
@@ -1775,7 +1784,7 @@ static void multipath_resume(struct dm_target *ti)
test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
/*
@@ -1798,14 +1807,13 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{
int sz = 0, pg_counter, pgpath_counter;
- unsigned long flags;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
unsigned int pg_num;
char state;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
/* Features */
if (type == STATUSTYPE_INFO)
@@ -1845,10 +1853,10 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("%u ", m->nr_priority_groups);
- if (m->next_pg)
- pg_num = m->next_pg->pg_num;
- else if (m->current_pg)
+ if (m->current_pg)
pg_num = m->current_pg->pg_num;
+ else if (m->next_pg)
+ pg_num = m->next_pg->pg_num;
else
pg_num = (m->nr_priority_groups ? 1 : 0);
@@ -1951,7 +1959,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
break;
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
@@ -1961,7 +1969,6 @@ static int multipath_message(struct dm_target *ti, unsigned int argc, char **arg
dev_t dev;
struct multipath *m = ti->private;
action_fn action;
- unsigned long flags;
mutex_lock(&m->work_mutex);
@@ -1973,9 +1980,9 @@ static int multipath_message(struct dm_target *ti, unsigned int argc, char **arg
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, true, false, __func__);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
enable_nopath_timeout(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, false, false, __func__);
@@ -2021,14 +2028,132 @@ out:
return r;
}
+/*
+ * Perform a minimal read from the given path to find out whether the
+ * path still works. If a path error occurs, fail it.
+ */
+static int probe_path(struct pgpath *pgpath)
+{
+ struct block_device *bdev = pgpath->path.dev->bdev;
+ unsigned int read_size = bdev_logical_block_size(bdev);
+ struct page *page;
+ struct bio *bio;
+ blk_status_t status;
+ int r = 0;
+
+ if (WARN_ON_ONCE(read_size > PAGE_SIZE))
+ return -EINVAL;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ /* Perform a minimal read: Sector 0, length read_size */
+ bio = bio_alloc(bdev, 1, REQ_OP_READ, GFP_KERNEL);
+ if (!bio) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ bio->bi_iter.bi_sector = 0;
+ __bio_add_page(bio, page, read_size, 0);
+ submit_bio_wait(bio);
+ status = bio->bi_status;
+ bio_put(bio);
+
+ if (status && blk_path_error(status))
+ fail_path(pgpath);
+
+out:
+ __free_page(page);
+ return r;
+}
+
+/*
+ * Probe all active paths in current_pg to find out whether they still work.
+ * Fail all paths that do not work.
+ *
+ * Return -ENOTCONN if no valid path is left (even outside of current_pg). We
+ * cannot probe paths in other pgs without switching current_pg, so if valid
+ * paths are only in different pgs, they may or may not work. Additionally
+ * we should not probe paths in a pathgroup that is in the process of
+ * Initializing. Userspace can submit a request and we'll switch and wait
+ * for the pathgroup to be initialized. If the request fails, it may need to
+ * probe again.
+ */
+static int probe_active_paths(struct multipath *m)
+{
+ struct pgpath *pgpath;
+ struct priority_group *pg = NULL;
+ int r = 0;
+
+ spin_lock_irq(&m->lock);
+ if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) {
+ wait_event_lock_irq(m->probe_wait,
+ !test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags),
+ m->lock);
+ /*
+ * if we waited because a probe was already in progress,
+ * and it probed the current active pathgroup, don't
+ * reprobe. Just return the number of valid paths
+ */
+ if (m->current_pg == m->last_probed_pg)
+ goto skip_probe;
+ }
+ if (!m->current_pg || m->is_suspending ||
+ test_bit(MPATHF_QUEUE_IO, &m->flags))
+ goto skip_probe;
+ set_bit(MPATHF_DELAY_PG_SWITCH, &m->flags);
+ pg = m->last_probed_pg = m->current_pg;
+ spin_unlock_irq(&m->lock);
+
+ list_for_each_entry(pgpath, &pg->pgpaths, list) {
+ if (pg != READ_ONCE(m->current_pg) ||
+ READ_ONCE(m->is_suspending))
+ goto out;
+ if (!pgpath->is_active)
+ continue;
+
+ r = probe_path(pgpath);
+ if (r < 0)
+ goto out;
+ }
+
+out:
+ spin_lock_irq(&m->lock);
+ clear_bit(MPATHF_DELAY_PG_SWITCH, &m->flags);
+ if (test_and_clear_bit(MPATHF_NEED_PG_SWITCH, &m->flags)) {
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
+skip_probe:
+ if (r == 0 && !atomic_read(&m->nr_valid_paths))
+ r = -ENOTCONN;
+ spin_unlock_irq(&m->lock);
+ if (pg)
+ wake_up(&m->probe_wait);
+ return r;
+}
+
static int multipath_prepare_ioctl(struct dm_target *ti,
- struct block_device **bdev)
+ struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct multipath *m = ti->private;
struct pgpath *pgpath;
- unsigned long flags;
int r;
+ if (_IOC_TYPE(cmd) == DM_IOCTL) {
+ *forward = false;
+ switch (cmd) {
+ case DM_MPATH_PROBE_PATHS:
+ return probe_active_paths(m);
+ default:
+ return -ENOTTY;
+ }
+ }
+
pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
pgpath = choose_pgpath(m, 0);
@@ -2044,10 +2169,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
} else {
/* No path is available */
r = -EIO;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
r = -ENOTCONN;
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
if (r == -ENOTCONN) {
@@ -2055,10 +2180,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
(void) __pg_init_all_paths(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
}
@@ -2180,7 +2305,7 @@ static int multipath_busy(struct dm_target *ti)
*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 14, 0},
+ .version = {1, 15, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
DM_TARGET_PASSES_INTEGRITY,
.module = THIS_MODULE,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9e615b4f1f5e..785af4816584 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -133,10 +133,9 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
spin_lock_irqsave(&ms->lock, flags);
should_wake = !(bl->head);
bio_list_add(bl, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
-
if (should_wake)
wakeup_mirrord(ms);
+ spin_unlock_irqrestore(&ms->lock, flags);
}
static void dispatch_bios(void *context, struct bio_list *bio_list)
@@ -646,9 +645,9 @@ static void write_callback(unsigned long error, void *context)
if (!ms->failures.head)
should_wake = 1;
bio_list_add(&ms->failures, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
wakeup_mirrord(ms);
+ spin_unlock_irqrestore(&ms->lock, flags);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index e23076f7ece2..a6ca92049c10 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -217,10 +217,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
if (unlikely(error == BLK_STS_TARGET)) {
if (req_op(clone) == REQ_OP_DISCARD &&
!clone->q->limits.max_discard_sectors)
- disable_discard(tio->md);
+ blk_queue_disable_discard(tio->md->queue);
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
!clone->q->limits.max_write_zeroes_sectors)
- disable_write_zeroes(tio->md);
+ blk_queue_disable_write_zeroes(tio->md->queue);
}
switch (r) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a1b7535c508a..a7dc04bd55e5 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -405,7 +405,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
unsigned int i;
- char major_minor[16];
+ char major_minor[22];
struct stripe_c *sc = ti->private;
if (!*error)
@@ -417,8 +417,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
if (*error == BLK_STS_NOTSUPP)
return DM_ENDIO_DONE;
- memset(major_minor, 0, sizeof(major_minor));
- sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
+ format_dev_t(major_minor, bio_dev(bio));
/*
* Test to see which stripe drive triggered the event
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index dfd9fb52a6f3..bb1a70b5a215 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -517,7 +517,9 @@ static void switch_status(struct dm_target *ti, status_type_t type,
*
* Passthrough all ioctls to the path for sector 0
*/
-static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct switch_ctx *sctx = ti->private;
unsigned int path_nr;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 6b23e777e10e..24a857ff6d0b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -117,7 +117,6 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
n_targets = (struct dm_target *) (n_highs + num);
memset(n_highs, -1, sizeof(*n_highs) * num);
- kvfree(t->highs);
t->num_allocated = num;
t->highs = n_highs;
@@ -257,7 +256,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
if (bdev_is_zoned(bdev)) {
unsigned int zone_sectors = bdev_zone_sectors(bdev);
- if (start & (zone_sectors - 1)) {
+ if (!bdev_is_zone_aligned(bdev, start)) {
DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)start,
@@ -274,7 +273,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* devices do not end up with a smaller zone in the middle of
* the sector range.
*/
- if (len & (zone_sectors - 1)) {
+ if (!bdev_is_zone_aligned(bdev, len)) {
DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)len,
@@ -431,6 +430,13 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
+ mutex_lock(&q->limits_lock);
+ /*
+ * BLK_FEAT_ATOMIC_WRITES is not inherited from the bottom device in
+ * blk_stack_limits(), so do it manually.
+ */
+ limits->features |= (q->limits.features & BLK_FEAT_ATOMIC_WRITES);
+
if (blk_stack_limits(limits, &q->limits,
get_start_sect(bdev) + start) < 0)
DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
@@ -448,6 +454,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
*/
if (!dm_target_has_integrity(ti->type))
queue_limits_stack_integrity_bdev(limits, bdev);
+ mutex_unlock(&q->limits_lock);
return 0;
}
@@ -1189,6 +1196,176 @@ put_live_table:
return 0;
}
+enum dm_wrappedkey_op {
+ DERIVE_SW_SECRET,
+ IMPORT_KEY,
+ GENERATE_KEY,
+ PREPARE_KEY,
+};
+
+struct dm_wrappedkey_op_args {
+ enum dm_wrappedkey_op op;
+ int err;
+ union {
+ struct {
+ const u8 *eph_key;
+ size_t eph_key_size;
+ u8 *sw_secret;
+ } derive_sw_secret;
+ struct {
+ const u8 *raw_key;
+ size_t raw_key_size;
+ u8 *lt_key;
+ } import_key;
+ struct {
+ u8 *lt_key;
+ } generate_key;
+ struct {
+ const u8 *lt_key;
+ size_t lt_key_size;
+ u8 *eph_key;
+ } prepare_key;
+ };
+};
+
+static int dm_wrappedkey_op_callback(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_wrappedkey_op_args *args = data;
+ struct block_device *bdev = dev->bdev;
+ struct blk_crypto_profile *profile =
+ bdev_get_queue(bdev)->crypto_profile;
+ int err = -EOPNOTSUPP;
+
+ if (!args->err)
+ return 0;
+
+ switch (args->op) {
+ case DERIVE_SW_SECRET:
+ err = blk_crypto_derive_sw_secret(
+ bdev,
+ args->derive_sw_secret.eph_key,
+ args->derive_sw_secret.eph_key_size,
+ args->derive_sw_secret.sw_secret);
+ break;
+ case IMPORT_KEY:
+ err = blk_crypto_import_key(profile,
+ args->import_key.raw_key,
+ args->import_key.raw_key_size,
+ args->import_key.lt_key);
+ break;
+ case GENERATE_KEY:
+ err = blk_crypto_generate_key(profile,
+ args->generate_key.lt_key);
+ break;
+ case PREPARE_KEY:
+ err = blk_crypto_prepare_key(profile,
+ args->prepare_key.lt_key,
+ args->prepare_key.lt_key_size,
+ args->prepare_key.eph_key);
+ break;
+ }
+ args->err = err;
+
+ /* Try another device in case this fails. */
+ return 0;
+}
+
+static int dm_exec_wrappedkey_op(struct blk_crypto_profile *profile,
+ struct dm_wrappedkey_op_args *args)
+{
+ struct mapped_device *md =
+ container_of(profile, struct dm_crypto_profile, profile)->md;
+ struct dm_target *ti;
+ struct dm_table *t;
+ int srcu_idx;
+ int i;
+
+ args->err = -EOPNOTSUPP;
+
+ t = dm_get_live_table(md, &srcu_idx);
+ if (!t)
+ goto out;
+
+ /*
+ * blk-crypto currently has no support for multiple incompatible
+ * implementations of wrapped inline crypto keys on a single system.
+ * It was already checked earlier that support for wrapped keys was
+ * declared on all underlying devices. Thus, all the underlying devices
+ * should support all wrapped key operations and they should behave
+ * identically, i.e. work with the same keys. So, just executing the
+ * operation on the first device on which it works suffices for now.
+ */
+ for (i = 0; i < t->num_targets; i++) {
+ ti = dm_table_get_target(t, i);
+ if (!ti->type->iterate_devices)
+ continue;
+ ti->type->iterate_devices(ti, dm_wrappedkey_op_callback, args);
+ if (!args->err)
+ break;
+ }
+out:
+ dm_put_live_table(md, srcu_idx);
+ return args->err;
+}
+
+static int dm_derive_sw_secret(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = DERIVE_SW_SECRET,
+ .derive_sw_secret = {
+ .eph_key = eph_key,
+ .eph_key_size = eph_key_size,
+ .sw_secret = sw_secret,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
+static int dm_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = IMPORT_KEY,
+ .import_key = {
+ .raw_key = raw_key,
+ .raw_key_size = raw_key_size,
+ .lt_key = lt_key,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
+static int dm_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = GENERATE_KEY,
+ .generate_key = {
+ .lt_key = lt_key,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
+static int dm_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = PREPARE_KEY,
+ .prepare_key = {
+ .lt_key = lt_key,
+ .lt_key_size = lt_key_size,
+ .eph_key = eph_key,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
static int
device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@@ -1263,6 +1440,13 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
profile);
}
+ if (profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) {
+ profile->ll_ops.derive_sw_secret = dm_derive_sw_secret;
+ profile->ll_ops.import_key = dm_import_key;
+ profile->ll_ops.generate_key = dm_generate_key;
+ profile->ll_ops.prepare_key = dm_prepare_key;
+ }
+
if (t->md->queue &&
!blk_crypto_has_capabilities(profile,
t->md->queue->crypto_profile)) {
@@ -1490,6 +1674,18 @@ bool dm_table_has_no_data_devices(struct dm_table *t)
return true;
}
+bool dm_table_is_wildcard(struct dm_table *t)
+{
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (!dm_target_is_wildcard(ti->type))
+ return false;
+ }
+
+ return true;
+}
+
static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1721,8 +1917,12 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
+ int b;
- return !q->limits.max_write_zeroes_sectors;
+ mutex_lock(&q->limits_lock);
+ b = !q->limits.max_write_zeroes_sectors;
+ mutex_unlock(&q->limits_lock);
+ return b;
}
static bool dm_table_supports_write_zeroes(struct dm_table *t)
@@ -1830,10 +2030,24 @@ static bool dm_table_supports_atomic_writes(struct dm_table *t)
return true;
}
+bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size,
+ sector_t new_size)
+{
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && dm_has_zone_plugs(t->md) &&
+ old_size != new_size) {
+ DMWARN("%s: device has zone write plug resources. "
+ "Cannot change size",
+ dm_device_name(t->md));
+ return false;
+ }
+ return true;
+}
+
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
int r;
+ struct queue_limits old_limits;
if (!dm_table_supports_nowait(t))
limits->features &= ~BLK_FEAT_NOWAIT;
@@ -1860,28 +2074,30 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_supports_flush(t))
limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
- if (dm_table_supports_dax(t, device_not_dax_capable)) {
+ if (dm_table_supports_dax(t, device_not_dax_capable))
limits->features |= BLK_FEAT_DAX;
- if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
- set_dax_synchronous(t->md->dax_dev);
- } else
+ else
limits->features &= ~BLK_FEAT_DAX;
- if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
- dax_write_cache(t->md->dax_dev, true);
-
/* For a zoned table, setup the zone related queue attributes. */
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- (limits->features & BLK_FEAT_ZONED)) {
- r = dm_set_zones_restrictions(t, q, limits);
- if (r)
- return r;
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ if (limits->features & BLK_FEAT_ZONED) {
+ r = dm_set_zones_restrictions(t, q, limits);
+ if (r)
+ return r;
+ } else if (dm_has_zone_plugs(t->md)) {
+ DMWARN("%s: device has zone write plug resources. "
+ "Cannot switch to non-zoned table.",
+ dm_device_name(t->md));
+ return -EINVAL;
+ }
}
if (dm_table_supports_atomic_writes(t))
limits->features |= BLK_FEAT_ATOMIC_WRITES;
- r = queue_limits_set(q, limits);
+ old_limits = queue_limits_start_update(q);
+ r = queue_limits_commit_update(q, limits);
if (r)
return r;
@@ -1892,10 +2108,21 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(limits->features & BLK_FEAT_ZONED)) {
r = dm_revalidate_zones(t, q);
- if (r)
+ if (r) {
+ queue_limits_set(q, &old_limits);
return r;
+ }
}
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ dm_finalize_zone_settings(t, limits);
+
+ if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
+ set_dax_synchronous(t->md->dax_dev);
+
+ if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
+ dax_write_cache(t->md->dax_dev, true);
+
dm_update_crypto_profile(q, t);
return 0;
}
diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c
index 655453bb276b..425b3a74f4db 100644
--- a/drivers/md/dm-vdo/indexer/volume.c
+++ b/drivers/md/dm-vdo/indexer/volume.c
@@ -754,10 +754,11 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
u32 physical_page, struct cached_page **page_ptr)
{
struct cached_page *page;
+ unsigned int zone_number = request->zone_number;
get_page_from_cache(&volume->page_cache, physical_page, &page);
if (page != NULL) {
- if (request->zone_number == 0) {
+ if (zone_number == 0) {
/* Only one zone is allowed to update the LRU. */
make_page_most_recent(&volume->page_cache, page);
}
@@ -767,7 +768,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
}
/* Prepare to enqueue a read for the page. */
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
mutex_lock(&volume->read_threads_mutex);
/*
@@ -787,8 +788,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
* the order does not matter for correctness as it does below.
*/
mutex_unlock(&volume->read_threads_mutex);
- begin_pending_search(&volume->page_cache, physical_page,
- request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
return UDS_QUEUED;
}
@@ -797,7 +797,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
* "search pending" state in careful order so no other thread can mess with the data before
* the caller gets to look at it.
*/
- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
mutex_unlock(&volume->read_threads_mutex);
*page_ptr = page;
return UDS_SUCCESS;
@@ -849,6 +849,7 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
{
int result;
struct cached_page *page = NULL;
+ unsigned int zone_number = request->zone_number;
u32 physical_page = map_to_physical_page(volume->geometry, chapter,
index_page_number);
@@ -858,18 +859,18 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
* invalidation by the reader thread, before the reader thread has noticed that the
* invalidate_counter has been incremented.
*/
- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
result = get_volume_page_protected(volume, request, physical_page, &page);
if (result != UDS_SUCCESS) {
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return result;
}
result = uds_search_chapter_index_page(&page->index_page, volume->geometry,
&request->record_name,
record_page_number);
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return result;
}
@@ -882,6 +883,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
{
struct cached_page *record_page;
struct index_geometry *geometry = volume->geometry;
+ unsigned int zone_number = request->zone_number;
int result;
u32 physical_page, page_number;
@@ -905,11 +907,11 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
* invalidation by the reader thread, before the reader thread has noticed that the
* invalidate_counter has been incremented.
*/
- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
result = get_volume_page_protected(volume, request, physical_page, &record_page);
if (result != UDS_SUCCESS) {
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return result;
}
@@ -917,7 +919,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
&request->record_name, geometry, &request->old_metadata))
*found = true;
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return UDS_SUCCESS;
}
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0c41949db784..631a887b487c 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -593,6 +593,10 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
(*argc)--;
if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
+ if (v->fec->dev) {
+ ti->error = "FEC device already specified";
+ return -EINVAL;
+ }
r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev);
if (r) {
ti->error = "FEC device lookup failed";
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 3c427f18a04b..81186bded1ce 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -682,7 +682,8 @@ static void verity_bh_work(struct work_struct *w)
static inline bool verity_use_bh(unsigned int bytes, unsigned short ioprio)
{
return ioprio <= IOPRIO_CLASS_IDLE &&
- bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]);
+ bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]) &&
+ !need_resched();
}
static void verity_end_io(struct bio *bio)
@@ -993,7 +994,9 @@ static void verity_status(struct dm_target *ti, status_type_t type,
}
}
-static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct dm_verity *v = ti->private;
@@ -1120,6 +1123,9 @@ static int verity_alloc_most_once(struct dm_verity *v)
{
struct dm_target *ti = v->ti;
+ if (v->validated_blocks)
+ return 0;
+
/* the bitset can only handle INT_MAX blocks */
if (v->data_blocks > INT_MAX) {
ti->error = "device too large to use check_at_most_once";
@@ -1143,6 +1149,9 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
struct dm_verity_io *io;
u8 *zero_data;
+ if (v->zero_digest)
+ return 0;
+
v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
if (!v->zero_digest)
@@ -1577,7 +1586,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- /* Root hash signature is a optional parameter*/
+ /* Root hash signature is an optional parameter */
r = verity_verify_root_hash(root_hash_digest_to_validate,
strlen(root_hash_digest_to_validate),
verify_args.sig,
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index a9e2c6c0a33c..d5261a0e4232 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -71,9 +71,14 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
const char *arg_name)
{
struct dm_target *ti = v->ti;
- int ret = 0;
+ int ret;
const char *sig_key = NULL;
+ if (v->signature_key_desc) {
+ ti->error = DM_VERITY_VERIFY_ERR("root_hash_sig_key_desc already specified");
+ return -EINVAL;
+ }
+
if (!*argc) {
ti->error = DM_VERITY_VERIFY_ERR("Signature key not specified");
return -EINVAL;
@@ -83,14 +88,18 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
(*argc)--;
ret = verity_verify_get_sig_from_key(sig_key, sig_opts);
- if (ret < 0)
+ if (ret < 0) {
ti->error = DM_VERITY_VERIFY_ERR("Invalid key specified");
+ return ret;
+ }
v->signature_key_desc = kstrdup(sig_key, GFP_KERNEL);
- if (!v->signature_key_desc)
+ if (!v->signature_key_desc) {
+ ti->error = DM_VERITY_VERIFY_ERR("Could not allocate memory for signature key");
return -ENOMEM;
+ }
- return ret;
+ return 0;
}
/*
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 20edd3fabbab..3d31b82e0730 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -56,24 +56,31 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
{
struct mapped_device *md = disk->private_data;
struct dm_table *map;
- int srcu_idx, ret;
+ struct dm_table *zone_revalidate_map = md->zone_revalidate_map;
+ int srcu_idx, ret = -EIO;
+ bool put_table = false;
- if (!md->zone_revalidate_map) {
- /* Regular user context */
+ if (!zone_revalidate_map || md->revalidate_map_task != current) {
+ /*
+ * Regular user context or
+ * Zone revalidation during __bind() is in progress, but this
+ * call is from a different process
+ */
if (dm_suspended_md(md))
return -EAGAIN;
map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- return -EIO;
+ put_table = true;
} else {
/* Zone revalidation during __bind() */
- map = md->zone_revalidate_map;
+ map = zone_revalidate_map;
}
- ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data);
+ if (map)
+ ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb,
+ data);
- if (!md->zone_revalidate_map)
+ if (put_table)
dm_put_live_table(md, srcu_idx);
return ret;
@@ -153,33 +160,36 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q)
{
struct mapped_device *md = t->md;
struct gendisk *disk = md->disk;
+ unsigned int nr_zones = disk->nr_zones;
int ret;
if (!get_capacity(disk))
return 0;
- /* Revalidate only if something changed. */
- if (!disk->nr_zones || disk->nr_zones != md->nr_zones) {
- DMINFO("%s using %s zone append",
- disk->disk_name,
- queue_emulates_zone_append(q) ? "emulated" : "native");
- md->nr_zones = 0;
- }
-
- if (md->nr_zones)
+ /*
+ * Do not revalidate if zone write plug resources have already
+ * been allocated.
+ */
+ if (dm_has_zone_plugs(md))
return 0;
+ DMINFO("%s using %s zone append", disk->disk_name,
+ queue_emulates_zone_append(q) ? "emulated" : "native");
+
/*
* Our table is not live yet. So the call to dm_get_live_table()
* in dm_blk_report_zones() will fail. Set a temporary pointer to
* our table for dm_blk_report_zones() to use directly.
*/
md->zone_revalidate_map = t;
+ md->revalidate_map_task = current;
ret = blk_revalidate_disk_zones(disk);
+ md->revalidate_map_task = NULL;
md->zone_revalidate_map = NULL;
if (ret) {
DMERR("Revalidate zones failed %d", ret);
+ disk->nr_zones = nr_zones;
return ret;
}
@@ -337,15 +347,15 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
/*
* Check if zone append is natively supported, and if not, set the
- * mapped device queue as needing zone append emulation.
+ * mapped device queue as needing zone append emulation. If zone
+ * append is natively supported, make sure that
+ * max_hw_zone_append_sectors is not set to 0.
*/
WARN_ON_ONCE(queue_is_mq(q));
- if (dm_table_supports_zone_append(t)) {
- clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- } else {
- set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ if (!dm_table_supports_zone_append(t))
lim->max_hw_zone_append_sectors = 0;
- }
+ else if (lim->max_hw_zone_append_sectors == 0)
+ lim->max_hw_zone_append_sectors = lim->max_zone_append_sectors;
/*
* Determine the max open and max active zone limits for the mapped
@@ -380,15 +390,28 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
lim->max_open_zones = 0;
lim->max_active_zones = 0;
lim->max_hw_zone_append_sectors = 0;
+ lim->max_zone_append_sectors = 0;
lim->zone_write_granularity = 0;
lim->chunk_sectors = 0;
lim->features &= ~BLK_FEAT_ZONED;
- clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- md->nr_zones = 0;
- disk->nr_zones = 0;
return 0;
}
+ if (get_capacity(disk) && dm_has_zone_plugs(t->md)) {
+ if (q->limits.chunk_sectors != lim->chunk_sectors) {
+ DMWARN("%s: device has zone write plug resources. "
+ "Cannot change zone size",
+ disk->disk_name);
+ return -EINVAL;
+ }
+ if (lim->max_hw_zone_append_sectors != 0 &&
+ !dm_table_is_wildcard(t)) {
+ DMWARN("%s: device has zone write plug resources. "
+ "New table must emulate zone append",
+ disk->disk_name);
+ return -EINVAL;
+ }
+ }
/*
* Warn once (when the capacity is not yet set) if the mapped device is
* partially using zone resources of the target devices as that leads to
@@ -408,6 +431,23 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
return 0;
}
+void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim)
+{
+ struct mapped_device *md = t->md;
+
+ if (lim->features & BLK_FEAT_ZONED) {
+ if (dm_table_supports_zone_append(t))
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ else
+ set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ } else {
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ md->nr_zones = 0;
+ md->disk->nr_zones = 0;
+ }
+}
+
+
/*
* IO completion callback called from clone_endio().
*/
@@ -423,9 +463,9 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
*/
if (clone->bi_status == BLK_STS_OK &&
bio_op(clone) == REQ_OP_ZONE_APPEND) {
- sector_t mask = bdev_zone_sectors(disk->part0) - 1;
-
- orig_bio->bi_iter.bi_sector += clone->bi_iter.bi_sector & mask;
+ orig_bio->bi_iter.bi_sector +=
+ bdev_offset_from_zone_start(disk->part0,
+ clone->bi_iter.bi_sector);
}
return;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 6141fc25d842..5da3db06da10 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1015,7 +1015,8 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
/*
* Pass on ioctl to the backend device.
*/
-static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg, bool *forward)
{
struct dmz_target *dmz = ti->private;
struct dmz_dev *dev = &dmz->dev[0];
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5ab7574c0c76..1726f0f828cc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -411,7 +411,8 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
- struct block_device **bdev)
+ struct block_device **bdev, unsigned int cmd,
+ unsigned long arg, bool *forward)
{
struct dm_target *ti;
struct dm_table *map;
@@ -434,8 +435,8 @@ retry:
if (dm_suspended_md(md))
return -EAGAIN;
- r = ti->type->prepare_ioctl(ti, bdev);
- if (r == -ENOTCONN && !fatal_signal_pending(current)) {
+ r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward);
+ if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
fsleep(10000);
goto retry;
@@ -454,9 +455,10 @@ static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
{
struct mapped_device *md = bdev->bd_disk->private_data;
int r, srcu_idx;
+ bool forward = true;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
- if (r < 0)
+ r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward);
+ if (!forward || r < 0)
goto out;
if (r > 0) {
@@ -1082,22 +1084,6 @@ static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
return &md->queue->limits;
}
-void disable_discard(struct mapped_device *md)
-{
- struct queue_limits *limits = dm_get_queue_limits(md);
-
- /* device doesn't really support DISCARD, disable it */
- limits->max_hw_discard_sectors = 0;
-}
-
-void disable_write_zeroes(struct mapped_device *md)
-{
- struct queue_limits *limits = dm_get_queue_limits(md);
-
- /* device doesn't really support WRITE ZEROES, disable it */
- limits->max_write_zeroes_sectors = 0;
-}
-
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
{
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
@@ -1115,10 +1101,10 @@ static void clone_endio(struct bio *bio)
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bdev_max_discard_sectors(bio->bi_bdev))
- disable_discard(md);
+ blk_queue_disable_discard(md->queue);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bdev_write_zeroes_sectors(bio->bi_bdev))
- disable_write_zeroes(md);
+ blk_queue_disable_write_zeroes(md->queue);
}
if (static_branch_unlikely(&zoned_enabled) &&
@@ -2421,21 +2407,35 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{
struct dm_table *old_map;
- sector_t size;
+ sector_t size, old_size;
int ret;
lockdep_assert_held(&md->suspend_lock);
size = dm_table_get_size(t);
+ old_size = dm_get_size(md);
+
+ if (!dm_table_supports_size_change(t, old_size, size)) {
+ old_map = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ set_capacity(md->disk, size);
+
+ ret = dm_table_set_restrictions(t, md->queue, limits);
+ if (ret) {
+ set_capacity(md->disk, old_size);
+ old_map = ERR_PTR(ret);
+ goto out;
+ }
+
/*
* Wipe any geometry if the size of the table changed.
*/
- if (size != dm_get_size(md))
+ if (size != old_size)
memset(&md->geometry, 0, sizeof(md->geometry));
- set_capacity(md->disk, size);
-
dm_table_event_callback(t, event_callback, md);
if (dm_table_request_based(t)) {
@@ -2453,10 +2453,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* requests in the queue may refer to bio from the old bioset,
* so you must walk through the queue to unprep.
*/
- if (!md->mempools) {
+ if (!md->mempools)
md->mempools = t->mempools;
- t->mempools = NULL;
- }
+ else
+ dm_free_md_mempools(t->mempools);
} else {
/*
* The md may already have mempools that need changing.
@@ -2465,14 +2465,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
*/
dm_free_md_mempools(md->mempools);
md->mempools = t->mempools;
- t->mempools = NULL;
- }
-
- ret = dm_table_set_restrictions(t, md->queue, limits);
- if (ret) {
- old_map = ERR_PTR(ret);
- goto out;
}
+ t->mempools = NULL;
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, (void *)t);
@@ -3638,10 +3632,13 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
int r, srcu_idx;
+ bool forward = true;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
+ /* Not a real ioctl, but targets must not interpret non-DM ioctls */
+ r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward);
if (r < 0)
goto out;
+ WARN_ON_ONCE(!forward);
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_clear)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a0a8ff119815..245f52b59215 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -58,6 +58,7 @@ void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
bool dm_table_has_no_data_devices(struct dm_table *table);
+bool dm_table_is_wildcard(struct dm_table *t);
int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits *limits);
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -72,6 +73,8 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
+bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size,
+ sector_t new_size);
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
@@ -102,6 +105,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *lim);
int dm_revalidate_zones(struct dm_table *t, struct request_queue *q);
+void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
@@ -110,12 +114,14 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
sector_t sector, unsigned int nr_zones,
unsigned long *need_reset);
+#define dm_has_zone_plugs(md) ((md)->disk->zone_wplugs_hash != NULL)
#else
#define dm_blk_report_zones NULL
static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
{
return false;
}
+#define dm_has_zone_plugs(md) false
#endif
/*
diff --git a/drivers/mfd/88pm886.c b/drivers/mfd/88pm886.c
index 891fdce5d8c1..39dd9a818b0f 100644
--- a/drivers/mfd/88pm886.c
+++ b/drivers/mfd/88pm886.c
@@ -16,11 +16,11 @@ static const struct regmap_config pm886_regmap_config = {
.max_register = PM886_REG_RTC_SPARE6,
};
-static struct regmap_irq pm886_regmap_irqs[] = {
+static const struct regmap_irq pm886_regmap_irqs[] = {
REGMAP_IRQ_REG(PM886_IRQ_ONKEY, 0, PM886_INT_ENA1_ONKEY),
};
-static struct regmap_irq_chip pm886_regmap_irq_chip = {
+static const struct regmap_irq_chip pm886_regmap_irq_chip = {
.name = "88pm886",
.irqs = pm886_regmap_irqs,
.num_irqs = ARRAY_SIZE(pm886_regmap_irqs),
@@ -30,11 +30,11 @@ static struct regmap_irq_chip pm886_regmap_irq_chip = {
.unmask_base = PM886_REG_INT_ENA_1,
};
-static struct resource pm886_onkey_resources[] = {
+static const struct resource pm886_onkey_resources[] = {
DEFINE_RES_IRQ_NAMED(PM886_IRQ_ONKEY, "88pm886-onkey"),
};
-static struct mfd_cell pm886_devs[] = {
+static const struct mfd_cell pm886_devs[] = {
MFD_CELL_RES("88pm886-onkey", pm886_onkey_resources),
MFD_CELL_NAME("88pm886-regulator"),
MFD_CELL_NAME("88pm886-rtc"),
@@ -124,7 +124,11 @@ static int pm886_probe(struct i2c_client *client)
if (err)
return dev_err_probe(dev, err, "Failed to register power off handler\n");
- device_init_wakeup(dev, device_property_read_bool(dev, "wakeup-source"));
+ if (device_property_read_bool(dev, "wakeup-source")) {
+ err = devm_device_init_wakeup(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to init wakeup\n");
+ }
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 96992af22565..6fb3768e3d71 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1312,21 +1312,42 @@ config MFD_RN5T618
functionality of the device.
config MFD_SEC_CORE
- tristate "Samsung Electronics PMIC Series Support"
+ tristate
+ select MFD_CORE
+ select REGMAP_IRQ
+
+config MFD_SEC_ACPM
+ tristate "Samsung Electronics S2MPG1x PMICs"
+ depends on EXYNOS_ACPM_PROTOCOL
+ depends on OF
+ select MFD_SEC_CORE
+ help
+ Support for the Samsung Electronics PMICs with ACPM interface.
+ This is a Power Management IC for mobile applications with buck
+ converters, various LDOs, power meters, RTC, clock outputs, and
+ additional GPIOs interfaces.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
+ To compile this driver as a module, choose M here: the module will be
+ called sec-acpm.
+
+config MFD_SEC_I2C
+ tristate "Samsung Electronics S2MPA/S2MPS1X/S2MPU/S5M series PMICs"
depends on I2C=y
depends on OF
- select MFD_CORE
+ select MFD_SEC_CORE
select REGMAP_I2C
- select REGMAP_IRQ
help
- Support for the Samsung Electronics PMIC devices coming
- usually along with Samsung Exynos SoC chipset.
+ Support for the Samsung Electronics PMIC devices with I2C interface
+ coming usually along with Samsung Exynos SoC chipset.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
- of the device
+ of the device.
To compile this driver as a module, choose M here: the
- module will be called sec-core.
+ module will be called sec-i2c.
Have in mind that important core drivers (like regulators) depend
on this driver so building this as a module might require proper
initial ramdisk or might not boot up as well in certain scenarios.
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 5e5cc279af60..79495f9f3457 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -229,7 +229,10 @@ obj-$(CONFIG_MFD_RK8XX) += rk8xx-core.o
obj-$(CONFIG_MFD_RK8XX_I2C) += rk8xx-i2c.o
obj-$(CONFIG_MFD_RK8XX_SPI) += rk8xx-spi.o
obj-$(CONFIG_MFD_RN5T618) += rn5t618.o
-obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o
+sec-core-objs := sec-common.o sec-irq.o
+obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o
+obj-$(CONFIG_MFD_SEC_ACPM) += sec-acpm.o
+obj-$(CONFIG_MFD_SEC_I2C) += sec-i2c.o
obj-$(CONFIG_MFD_SYSCON) += syscon.o
obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
obj-$(CONFIG_MFD_VEXPRESS_SYSREG) += vexpress-sysreg.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 8ef510e84688..34d66ba9646a 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -320,9 +320,7 @@ static const struct file_operations aat2870_reg_fops = {
static void aat2870_init_debugfs(struct aat2870_data *aat2870)
{
- aat2870->dentry_root = debugfs_create_dir("aat2870", NULL);
-
- debugfs_create_file("regs", 0644, aat2870->dentry_root, aat2870,
+ debugfs_create_file("regs", 0644, aat2870->client->debugfs, aat2870,
&aat2870_reg_fops);
}
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index 6c0d89b0c7e3..7ab6fcc9c27c 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -394,7 +394,9 @@ static int as3722_i2c_probe(struct i2c_client *i2c)
return ret;
}
- device_init_wakeup(as3722->dev, true);
+ ret = devm_device_init_wakeup(as3722->dev);
+ if (ret)
+ return dev_err_probe(as3722->dev, ret, "Failed to init wakeup\n");
dev_dbg(as3722->dev, "AS3722 core driver initialized successfully\n");
return 0;
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 8b56786d85d0..5a8456bbd63f 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -17,6 +17,15 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+/* Under primary I2C address: */
+#define BCM590XX_REG_PMUID 0x1e
+
+#define BCM590XX_REG_PMUREV 0x1f
+#define BCM590XX_PMUREV_DIG_MASK 0xF
+#define BCM590XX_PMUREV_DIG_SHIFT 0
+#define BCM590XX_PMUREV_ANA_MASK 0xF0
+#define BCM590XX_PMUREV_ANA_SHIFT 4
+
static const struct mfd_cell bcm590xx_devs[] = {
{
.name = "bcm590xx-vregs",
@@ -37,6 +46,47 @@ static const struct regmap_config bcm590xx_regmap_config_sec = {
.cache_type = REGCACHE_MAPLE,
};
+/* Map PMU ID value to model name string */
+static const char * const bcm590xx_names[] = {
+ [BCM590XX_PMUID_BCM59054] = "BCM59054",
+ [BCM590XX_PMUID_BCM59056] = "BCM59056",
+};
+
+static int bcm590xx_parse_version(struct bcm590xx *bcm590xx)
+{
+ unsigned int id, rev;
+ int ret;
+
+ /* Get PMU ID and verify that it matches compatible */
+ ret = regmap_read(bcm590xx->regmap_pri, BCM590XX_REG_PMUID, &id);
+ if (ret) {
+ dev_err(bcm590xx->dev, "failed to read PMU ID: %d\n", ret);
+ return ret;
+ }
+
+ if (id != bcm590xx->pmu_id) {
+ dev_err(bcm590xx->dev, "Incorrect ID for %s: expected %x, got %x.\n",
+ bcm590xx_names[bcm590xx->pmu_id], bcm590xx->pmu_id, id);
+ return -ENODEV;
+ }
+
+ /* Get PMU revision and store it in the info struct */
+ ret = regmap_read(bcm590xx->regmap_pri, BCM590XX_REG_PMUREV, &rev);
+ if (ret) {
+ dev_err(bcm590xx->dev, "failed to read PMU revision: %d\n", ret);
+ return ret;
+ }
+
+ bcm590xx->rev_digital = (rev & BCM590XX_PMUREV_DIG_MASK) >> BCM590XX_PMUREV_DIG_SHIFT;
+
+ bcm590xx->rev_analog = (rev & BCM590XX_PMUREV_ANA_MASK) >> BCM590XX_PMUREV_ANA_SHIFT;
+
+ dev_dbg(bcm590xx->dev, "PMU ID 0x%x (%s), revision: digital %d, analog %d",
+ id, bcm590xx_names[id], bcm590xx->rev_digital, bcm590xx->rev_analog);
+
+ return 0;
+}
+
static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
{
struct bcm590xx *bcm590xx;
@@ -50,6 +100,8 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
bcm590xx->dev = &i2c_pri->dev;
bcm590xx->i2c_pri = i2c_pri;
+ bcm590xx->pmu_id = (uintptr_t) of_device_get_match_data(bcm590xx->dev);
+
bcm590xx->regmap_pri = devm_regmap_init_i2c(i2c_pri,
&bcm590xx_regmap_config_pri);
if (IS_ERR(bcm590xx->regmap_pri)) {
@@ -76,6 +128,10 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
goto err;
}
+ ret = bcm590xx_parse_version(bcm590xx);
+ if (ret)
+ goto err;
+
ret = devm_mfd_add_devices(&i2c_pri->dev, -1, bcm590xx_devs,
ARRAY_SIZE(bcm590xx_devs), NULL, 0, NULL);
if (ret < 0) {
@@ -91,12 +147,20 @@ err:
}
static const struct of_device_id bcm590xx_of_match[] = {
- { .compatible = "brcm,bcm59056" },
+ {
+ .compatible = "brcm,bcm59054",
+ .data = (void *)BCM590XX_PMUID_BCM59054,
+ },
+ {
+ .compatible = "brcm,bcm59056",
+ .data = (void *)BCM590XX_PMUID_BCM59056,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, bcm590xx_of_match);
static const struct i2c_device_id bcm590xx_i2c_id[] = {
+ { "bcm59054" },
{ "bcm59056" },
{ }
};
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index 6a585173230b..44797001a432 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -104,11 +104,22 @@ static const struct regmap_config exynos_lpass_reg_conf = {
.fast_io = true,
};
+static void exynos_lpass_disable_lpass(void *data)
+{
+ struct platform_device *pdev = data;
+ struct exynos_lpass *lpass = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ exynos_lpass_disable(lpass);
+}
+
static int exynos_lpass_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_lpass *lpass;
void __iomem *base_top;
+ int ret;
lpass = devm_kzalloc(dev, sizeof(*lpass), GFP_KERNEL);
if (!lpass)
@@ -122,8 +133,8 @@ static int exynos_lpass_probe(struct platform_device *pdev)
if (IS_ERR(lpass->sfr0_clk))
return PTR_ERR(lpass->sfr0_clk);
- lpass->top = regmap_init_mmio(dev, base_top,
- &exynos_lpass_reg_conf);
+ lpass->top = devm_regmap_init_mmio(dev, base_top,
+ &exynos_lpass_reg_conf);
if (IS_ERR(lpass->top)) {
dev_err(dev, "LPASS top regmap initialization failed\n");
return PTR_ERR(lpass->top);
@@ -134,18 +145,11 @@ static int exynos_lpass_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
exynos_lpass_enable(lpass);
- return devm_of_platform_populate(dev);
-}
-
-static void exynos_lpass_remove(struct platform_device *pdev)
-{
- struct exynos_lpass *lpass = platform_get_drvdata(pdev);
+ ret = devm_add_action_or_reset(dev, exynos_lpass_disable_lpass, pdev);
+ if (ret)
+ return ret;
- exynos_lpass_disable(lpass);
- pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- exynos_lpass_disable(lpass);
- regmap_exit(lpass->top);
+ return devm_of_platform_populate(dev);
}
static int __maybe_unused exynos_lpass_suspend(struct device *dev)
@@ -185,7 +189,6 @@ static struct platform_driver exynos_lpass_driver = {
.of_match_table = exynos_lpass_of_match,
},
.probe = exynos_lpass_probe,
- .remove = exynos_lpass_remove,
};
module_platform_driver(exynos_lpass_driver);
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 6fce79ec2dc6..7e7e8af9af22 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -456,6 +456,7 @@ static void max14577_i2c_remove(struct i2c_client *i2c)
{
struct max14577 *max14577 = i2c_get_clientdata(i2c);
+ device_init_wakeup(max14577->dev, false);
mfd_remove_devices(max14577->dev);
regmap_del_irq_chip(max14577->irq, max14577->irq_data);
if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)
diff --git a/drivers/mfd/max77541.c b/drivers/mfd/max77541.c
index d77c31c86e43..f91b4f5373ce 100644
--- a/drivers/mfd/max77541.c
+++ b/drivers/mfd/max77541.c
@@ -152,7 +152,7 @@ static int max77541_pmic_setup(struct device *dev)
if (ret)
return dev_err_probe(dev, ret, "Failed to initialize IRQ\n");
- ret = device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
if (ret)
return dev_err_probe(dev, ret, "Unable to init wakeup\n");
diff --git a/drivers/mfd/max77705.c b/drivers/mfd/max77705.c
index 60c457c21d95..6b263bacb8c2 100644
--- a/drivers/mfd/max77705.c
+++ b/drivers/mfd/max77705.c
@@ -131,7 +131,9 @@ static int max77705_i2c_probe(struct i2c_client *i2c)
if (ret)
return dev_err_probe(dev, ret, "Failed to register child devices\n");
- device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
return 0;
}
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 556aea7ec0a0..ab19ff0c7867 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -201,6 +201,7 @@ static void max8925_remove(struct i2c_client *client)
struct max8925_chip *chip = i2c_get_clientdata(client);
max8925_device_exit(chip);
+ device_init_wakeup(&client->dev, false);
i2c_unregister_device(chip->adc);
i2c_unregister_device(chip->rtc);
}
diff --git a/drivers/mfd/rohm-bd96801.c b/drivers/mfd/rohm-bd96801.c
index 60ec8db790a7..66fa017ad568 100644
--- a/drivers/mfd/rohm-bd96801.c
+++ b/drivers/mfd/rohm-bd96801.c
@@ -38,108 +38,172 @@
#include <linux/types.h>
#include <linux/mfd/rohm-bd96801.h>
+#include <linux/mfd/rohm-bd96802.h>
#include <linux/mfd/rohm-generic.h>
-static const struct resource regulator_errb_irqs[] = {
- DEFINE_RES_IRQ_NAMED(BD96801_OTP_ERR_STAT, "bd96801-otp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_DBIST_ERR_STAT, "bd96801-dbist-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_EEP_ERR_STAT, "bd96801-eep-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_ABIST_ERR_STAT, "bd96801-abist-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_PRSTB_ERR_STAT, "bd96801-prstb-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_DRMOS1_ERR_STAT, "bd96801-drmoserr1"),
- DEFINE_RES_IRQ_NAMED(BD96801_DRMOS2_ERR_STAT, "bd96801-drmoserr2"),
- DEFINE_RES_IRQ_NAMED(BD96801_SLAVE_ERR_STAT, "bd96801-slave-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_VREF_ERR_STAT, "bd96801-vref-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_TSD_ERR_STAT, "bd96801-tsd"),
- DEFINE_RES_IRQ_NAMED(BD96801_UVLO_ERR_STAT, "bd96801-uvlo-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_OVLO_ERR_STAT, "bd96801-ovlo-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_OSC_ERR_STAT, "bd96801-osc-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_PON_ERR_STAT, "bd96801-pon-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_POFF_ERR_STAT, "bd96801-poff-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_CMD_SHDN_ERR_STAT, "bd96801-cmd-shdn-err"),
+struct bd968xx {
+ const struct resource *errb_irqs;
+ const struct resource *intb_irqs;
+ int num_errb_irqs;
+ int num_intb_irqs;
+ const struct regmap_irq_chip *errb_irq_chip;
+ const struct regmap_irq_chip *intb_irq_chip;
+ const struct regmap_config *regmap_config;
+ struct mfd_cell *cells;
+ int num_cells;
+ int unlock_reg;
+ int unlock_val;
+};
+
+static const struct resource bd96801_reg_errb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96801_OTP_ERR_STAT, "otp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_DBIST_ERR_STAT, "dbist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_EEP_ERR_STAT, "eep-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_ABIST_ERR_STAT, "abist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_PRSTB_ERR_STAT, "prstb-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_DRMOS1_ERR_STAT, "drmoserr1"),
+ DEFINE_RES_IRQ_NAMED(BD96801_DRMOS2_ERR_STAT, "drmoserr2"),
+ DEFINE_RES_IRQ_NAMED(BD96801_SLAVE_ERR_STAT, "slave-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_VREF_ERR_STAT, "vref-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_TSD_ERR_STAT, "tsd"),
+ DEFINE_RES_IRQ_NAMED(BD96801_UVLO_ERR_STAT, "uvlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_OVLO_ERR_STAT, "ovlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_OSC_ERR_STAT, "osc-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_PON_ERR_STAT, "pon-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_POFF_ERR_STAT, "poff-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_CMD_SHDN_ERR_STAT, "cmd-shdn-err"),
DEFINE_RES_IRQ_NAMED(BD96801_INT_PRSTB_WDT_ERR, "bd96801-prstb-wdt-err"),
DEFINE_RES_IRQ_NAMED(BD96801_INT_CHIP_IF_ERR, "bd96801-chip-if-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_INT_SHDN_ERR_STAT, "bd96801-int-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_PVIN_ERR_STAT, "bd96801-buck1-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVP_ERR_STAT, "bd96801-buck1-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVP_ERR_STAT, "bd96801-buck1-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_SHDN_ERR_STAT, "bd96801-buck1-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_PVIN_ERR_STAT, "bd96801-buck2-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVP_ERR_STAT, "bd96801-buck2-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVP_ERR_STAT, "bd96801-buck2-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_SHDN_ERR_STAT, "bd96801-buck2-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_PVIN_ERR_STAT, "bd96801-buck3-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVP_ERR_STAT, "bd96801-buck3-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVP_ERR_STAT, "bd96801-buck3-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_SHDN_ERR_STAT, "bd96801-buck3-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_PVIN_ERR_STAT, "bd96801-buck4-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVP_ERR_STAT, "bd96801-buck4-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVP_ERR_STAT, "bd96801-buck4-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_SHDN_ERR_STAT, "bd96801-buck4-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_PVIN_ERR_STAT, "bd96801-ldo5-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVP_ERR_STAT, "bd96801-ldo5-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVP_ERR_STAT, "bd96801-ldo5-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_SHDN_ERR_STAT, "bd96801-ldo5-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_PVIN_ERR_STAT, "bd96801-ldo6-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVP_ERR_STAT, "bd96801-ldo6-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVP_ERR_STAT, "bd96801-ldo6-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_SHDN_ERR_STAT, "bd96801-ldo6-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_PVIN_ERR_STAT, "bd96801-ldo7-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVP_ERR_STAT, "bd96801-ldo7-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVP_ERR_STAT, "bd96801-ldo7-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_SHDN_ERR_STAT, "bd96801-ldo7-shdn-err"),
-};
-
-static const struct resource regulator_intb_irqs[] = {
- DEFINE_RES_IRQ_NAMED(BD96801_TW_STAT, "bd96801-core-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPH_STAT, "bd96801-buck1-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPL_STAT, "bd96801-buck1-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPN_STAT, "bd96801-buck1-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVD_STAT, "bd96801-buck1-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVD_STAT, "bd96801-buck1-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_TW_CH_STAT, "bd96801-buck1-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPH_STAT, "bd96801-buck2-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPL_STAT, "bd96801-buck2-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPN_STAT, "bd96801-buck2-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVD_STAT, "bd96801-buck2-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVD_STAT, "bd96801-buck2-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_TW_CH_STAT, "bd96801-buck2-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPH_STAT, "bd96801-buck3-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPL_STAT, "bd96801-buck3-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPN_STAT, "bd96801-buck3-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVD_STAT, "bd96801-buck3-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVD_STAT, "bd96801-buck3-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_TW_CH_STAT, "bd96801-buck3-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPH_STAT, "bd96801-buck4-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPL_STAT, "bd96801-buck4-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPN_STAT, "bd96801-buck4-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVD_STAT, "bd96801-buck4-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVD_STAT, "bd96801-buck4-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_TW_CH_STAT, "bd96801-buck4-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OCPH_STAT, "bd96801-ldo5-overcurr"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVD_STAT, "bd96801-ldo5-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVD_STAT, "bd96801-ldo5-undervolt"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OCPH_STAT, "bd96801-ldo6-overcurr"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVD_STAT, "bd96801-ldo6-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVD_STAT, "bd96801-ldo6-undervolt"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OCPH_STAT, "bd96801-ldo7-overcurr"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVD_STAT, "bd96801-ldo7-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVD_STAT, "bd96801-ldo7-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_INT_SHDN_ERR_STAT, "int-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_PVIN_ERR_STAT, "buck1-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVP_ERR_STAT, "buck1-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVP_ERR_STAT, "buck1-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_SHDN_ERR_STAT, "buck1-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_PVIN_ERR_STAT, "buck2-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVP_ERR_STAT, "buck2-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVP_ERR_STAT, "buck2-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_SHDN_ERR_STAT, "buck2-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_PVIN_ERR_STAT, "buck3-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVP_ERR_STAT, "buck3-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVP_ERR_STAT, "buck3-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_SHDN_ERR_STAT, "buck3-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_PVIN_ERR_STAT, "buck4-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVP_ERR_STAT, "buck4-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVP_ERR_STAT, "buck4-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_SHDN_ERR_STAT, "buck4-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_PVIN_ERR_STAT, "ldo5-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVP_ERR_STAT, "ldo5-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVP_ERR_STAT, "ldo5-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_SHDN_ERR_STAT, "ldo5-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_PVIN_ERR_STAT, "ldo6-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVP_ERR_STAT, "ldo6-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVP_ERR_STAT, "ldo6-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_SHDN_ERR_STAT, "ldo6-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_PVIN_ERR_STAT, "ldo7-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVP_ERR_STAT, "ldo7-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVP_ERR_STAT, "ldo7-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_SHDN_ERR_STAT, "ldo7-shdn-err"),
+};
+
+static const struct resource bd96802_reg_errb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96802_OTP_ERR_STAT, "otp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_DBIST_ERR_STAT, "dbist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_EEP_ERR_STAT, "eep-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_ABIST_ERR_STAT, "abist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_PRSTB_ERR_STAT, "prstb-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_DRMOS1_ERR_STAT, "drmoserr1"),
+ DEFINE_RES_IRQ_NAMED(BD96802_DRMOS1_ERR_STAT, "drmoserr2"),
+ DEFINE_RES_IRQ_NAMED(BD96802_SLAVE_ERR_STAT, "slave-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_VREF_ERR_STAT, "vref-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_TSD_ERR_STAT, "tsd"),
+ DEFINE_RES_IRQ_NAMED(BD96802_UVLO_ERR_STAT, "uvlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_OVLO_ERR_STAT, "ovlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_OSC_ERR_STAT, "osc-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_PON_ERR_STAT, "pon-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_POFF_ERR_STAT, "poff-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_CMD_SHDN_ERR_STAT, "cmd-shdn-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_INT_SHDN_ERR_STAT, "int-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_PVIN_ERR_STAT, "buck1-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OVP_ERR_STAT, "buck1-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_UVP_ERR_STAT, "buck1-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_SHDN_ERR_STAT, "buck1-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_PVIN_ERR_STAT, "buck2-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OVP_ERR_STAT, "buck2-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_UVP_ERR_STAT, "buck2-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_SHDN_ERR_STAT, "buck2-shdn-err"),
+};
+
+static const struct resource bd96801_reg_intb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96801_TW_STAT, "core-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPH_STAT, "buck1-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPL_STAT, "buck1-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPN_STAT, "buck1-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVD_STAT, "buck1-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVD_STAT, "buck1-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_TW_CH_STAT, "buck1-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPH_STAT, "buck2-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPL_STAT, "buck2-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPN_STAT, "buck2-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVD_STAT, "buck2-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVD_STAT, "buck2-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_TW_CH_STAT, "buck2-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPH_STAT, "buck3-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPL_STAT, "buck3-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPN_STAT, "buck3-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVD_STAT, "buck3-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVD_STAT, "buck3-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_TW_CH_STAT, "buck3-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPH_STAT, "buck4-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPL_STAT, "buck4-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPN_STAT, "buck4-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVD_STAT, "buck4-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVD_STAT, "buck4-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_TW_CH_STAT, "buck4-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OCPH_STAT, "ldo5-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVD_STAT, "ldo5-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVD_STAT, "ldo5-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OCPH_STAT, "ldo6-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVD_STAT, "ldo6-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVD_STAT, "ldo6-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OCPH_STAT, "ldo7-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVD_STAT, "ldo7-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVD_STAT, "ldo7-undervolt"),
+};
+
+static const struct resource bd96802_reg_intb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96802_TW_STAT, "core-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OCPH_STAT, "buck1-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OCPL_STAT, "buck1-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OCPN_STAT, "buck1-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OVD_STAT, "buck1-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_UVD_STAT, "buck1-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_TW_CH_STAT, "buck1-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OCPH_STAT, "buck2-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OCPL_STAT, "buck2-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OCPN_STAT, "buck2-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OVD_STAT, "buck2-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_UVD_STAT, "buck2-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_TW_CH_STAT, "buck2-thermal"),
};
enum {
@@ -152,6 +216,20 @@ static struct mfd_cell bd96801_cells[] = {
[REGULATOR_CELL] = { .name = "bd96801-regulator", },
};
+static struct mfd_cell bd96802_cells[] = {
+ [WDG_CELL] = { .name = "bd96801-wdt", },
+ [REGULATOR_CELL] = { .name = "bd96802-regulator", },
+};
+static struct mfd_cell bd96805_cells[] = {
+ [WDG_CELL] = { .name = "bd96801-wdt", },
+ [REGULATOR_CELL] = { .name = "bd96805-regulator", },
+};
+
+static struct mfd_cell bd96806_cells[] = {
+ [WDG_CELL] = { .name = "bd96806-wdt", },
+ [REGULATOR_CELL] = { .name = "bd96806-regulator", },
+};
+
static const struct regmap_range bd96801_volatile_ranges[] = {
/* Status registers */
regmap_reg_range(BD96801_REG_WD_FEED, BD96801_REG_WD_FAILCOUNT),
@@ -169,11 +247,28 @@ static const struct regmap_range bd96801_volatile_ranges[] = {
regmap_reg_range(BD96801_LDO5_VOL_LVL_REG, BD96801_LDO7_VOL_LVL_REG),
};
-static const struct regmap_access_table volatile_regs = {
+static const struct regmap_range bd96802_volatile_ranges[] = {
+ /* Status regs */
+ regmap_reg_range(BD96801_REG_WD_FEED, BD96801_REG_WD_FAILCOUNT),
+ regmap_reg_range(BD96801_REG_WD_ASK, BD96801_REG_WD_ASK),
+ regmap_reg_range(BD96801_REG_WD_STATUS, BD96801_REG_WD_STATUS),
+ regmap_reg_range(BD96801_REG_PMIC_STATE, BD96801_REG_INT_BUCK2_ERRB),
+ regmap_reg_range(BD96801_REG_INT_SYS_INTB, BD96801_REG_INT_BUCK2_INTB),
+ /* Registers which do not update value unless PMIC is in STBY */
+ regmap_reg_range(BD96801_REG_SSCG_CTRL, BD96801_REG_SHD_INTB),
+ regmap_reg_range(BD96801_REG_BUCK_OVP, BD96801_REG_BOOT_OVERTIME),
+};
+
+static const struct regmap_access_table bd96801_volatile_regs = {
.yes_ranges = bd96801_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(bd96801_volatile_ranges),
};
+static const struct regmap_access_table bd96802_volatile_regs = {
+ .yes_ranges = bd96802_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bd96802_volatile_ranges),
+};
+
/*
* For ERRB we need main register bit mapping as bit(0) indicates active IRQ
* in one of the first 3 sub IRQ registers, For INTB we can use default 1 to 1
@@ -188,7 +283,7 @@ static unsigned int bit5_offsets[] = {7}; /* LDO 5 stat */
static unsigned int bit6_offsets[] = {8}; /* LDO 6 stat */
static unsigned int bit7_offsets[] = {9}; /* LDO 7 stat */
-static const struct regmap_irq_sub_irq_map errb_sub_irq_offsets[] = {
+static const struct regmap_irq_sub_irq_map bd96801_errb_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
@@ -199,6 +294,12 @@ static const struct regmap_irq_sub_irq_map errb_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
};
+static const struct regmap_irq_sub_irq_map bd96802_errb_sub_irq_offsets[] = {
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
+};
+
static const struct regmap_irq bd96801_errb_irqs[] = {
/* Reg 0x52 Fatal ERRB1 */
REGMAP_IRQ_REG(BD96801_OTP_ERR_STAT, 0, BD96801_OTP_ERR_MASK),
@@ -259,6 +360,39 @@ static const struct regmap_irq bd96801_errb_irqs[] = {
REGMAP_IRQ_REG(BD96801_LDO7_SHDN_ERR_STAT, 9, BD96801_OUT_SHDN_ERR_MASK),
};
+static const struct regmap_irq bd96802_errb_irqs[] = {
+ /* Reg 0x52 Fatal ERRB1 */
+ REGMAP_IRQ_REG(BD96802_OTP_ERR_STAT, 0, BD96801_OTP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_DBIST_ERR_STAT, 0, BD96801_DBIST_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_EEP_ERR_STAT, 0, BD96801_EEP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_ABIST_ERR_STAT, 0, BD96801_ABIST_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_PRSTB_ERR_STAT, 0, BD96801_PRSTB_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_DRMOS1_ERR_STAT, 0, BD96801_DRMOS1_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_DRMOS2_ERR_STAT, 0, BD96801_DRMOS2_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_SLAVE_ERR_STAT, 0, BD96801_SLAVE_ERR_MASK),
+ /* 0x53 Fatal ERRB2 */
+ REGMAP_IRQ_REG(BD96802_VREF_ERR_STAT, 1, BD96801_VREF_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_TSD_ERR_STAT, 1, BD96801_TSD_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_UVLO_ERR_STAT, 1, BD96801_UVLO_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_OVLO_ERR_STAT, 1, BD96801_OVLO_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_OSC_ERR_STAT, 1, BD96801_OSC_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_PON_ERR_STAT, 1, BD96801_PON_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_POFF_ERR_STAT, 1, BD96801_POFF_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_CMD_SHDN_ERR_STAT, 1, BD96801_CMD_SHDN_ERR_MASK),
+ /* 0x54 Fatal INTB shadowed to ERRB */
+ REGMAP_IRQ_REG(BD96802_INT_SHDN_ERR_STAT, 2, BD96801_INT_SHDN_ERR_MASK),
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ REGMAP_IRQ_REG(BD96802_BUCK1_PVIN_ERR_STAT, 3, BD96801_OUT_PVIN_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OVP_ERR_STAT, 3, BD96801_OUT_OVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_UVP_ERR_STAT, 3, BD96801_OUT_UVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_SHDN_ERR_STAT, 3, BD96801_OUT_SHDN_ERR_MASK),
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ REGMAP_IRQ_REG(BD96802_BUCK2_PVIN_ERR_STAT, 4, BD96801_OUT_PVIN_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OVP_ERR_STAT, 4, BD96801_OUT_OVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_UVP_ERR_STAT, 4, BD96801_OUT_UVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_SHDN_ERR_STAT, 4, BD96801_OUT_SHDN_ERR_MASK),
+};
+
static const struct regmap_irq bd96801_intb_irqs[] = {
/* STATUS SYSTEM INTB */
REGMAP_IRQ_REG(BD96801_TW_STAT, 0, BD96801_TW_STAT_MASK),
@@ -307,6 +441,69 @@ static const struct regmap_irq bd96801_intb_irqs[] = {
REGMAP_IRQ_REG(BD96801_LDO7_UVD_STAT, 7, BD96801_LDO_UVD_STAT_MASK),
};
+static const struct regmap_irq bd96802_intb_irqs[] = {
+ /* STATUS SYSTEM INTB */
+ REGMAP_IRQ_REG(BD96802_TW_STAT, 0, BD96801_TW_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_WDT_ERR_STAT, 0, BD96801_WDT_ERR_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_I2C_ERR_STAT, 0, BD96801_I2C_ERR_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_CHIP_IF_ERR_STAT, 0, BD96801_CHIP_IF_ERR_STAT_MASK),
+ /* STATUS BUCK1 INTB */
+ REGMAP_IRQ_REG(BD96802_BUCK1_OCPH_STAT, 1, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OCPL_STAT, 1, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OCPN_STAT, 1, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OVD_STAT, 1, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_UVD_STAT, 1, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_TW_CH_STAT, 1, BD96801_BUCK_TW_CH_STAT_MASK),
+ /* BUCK 2 INTB */
+ REGMAP_IRQ_REG(BD96802_BUCK2_OCPH_STAT, 2, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OCPL_STAT, 2, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OCPN_STAT, 2, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OVD_STAT, 2, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_UVD_STAT, 2, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_TW_CH_STAT, 2, BD96801_BUCK_TW_CH_STAT_MASK),
+};
+
+/*
+ * The IRQ stuff is a bit hairy. The BD96801 / BD96802 provide two physical
+ * IRQ lines called INTB and ERRB. They share the same main status register.
+ *
+ * For ERRB, mapping from main status to sub-status is such that the
+ * 'global' faults are mapped to first 3 sub-status registers - and indicated
+ * by the first bit[0] in main status reg.
+ *
+ * Rest of the status registers are for indicating stuff for individual
+ * regulators, 1 sub register / regulator and 1 main status register bit /
+ * regulator, starting from bit[1].
+ *
+ * Eg, regulator specific stuff has 1 to 1 mapping from main-status to sub
+ * registers but 'global' ERRB IRQs require mapping from main status bit[0] to
+ * 3 status registers.
+ *
+ * Furthermore, the BD96801 has 7 regulators where the BD96802 has only 2.
+ *
+ * INTB has only 1 sub status register for 'global' events and then own sub
+ * status register for each of the regulators. So, for INTB we have direct
+ * 1 to 1 mapping - BD96801 just having 5 register and 5 main status bits
+ * more than the BD96802.
+ *
+ * Sharing the main status bits could be a problem if we had both INTB and
+ * ERRB IRQs asserted but for different sub-status offsets. This might lead
+ * IRQ controller code to go read a sub status register which indicates no
+ * active IRQs. I assume this occurring repeteadly might lead the IRQ to be
+ * disabled by core as a result of repeteadly returned IRQ_NONEs.
+ *
+ * I don't consider this as a fatal problem for now because:
+ * a) Having ERRB asserted leads to PMIC fault state which will kill
+ * the SoC powered by the PMIC. (So, relevant only for potential
+ * case of not powering the processor with this PMIC).
+ * b) Having ERRB set without having respective INTB is unlikely
+ * (haven't actually verified this).
+ *
+ * So, let's proceed with main status enabled for both INTB and ERRB. We can
+ * later disable main-status usage on systems where this ever proves to be
+ * a problem.
+ */
+
static const struct regmap_irq_chip bd96801_irq_chip_errb = {
.name = "bd96801-irq-errb",
.domain_suffix = "errb",
@@ -320,7 +517,23 @@ static const struct regmap_irq_chip bd96801_irq_chip_errb = {
.init_ack_masked = true,
.num_regs = 10,
.irq_reg_stride = 1,
- .sub_reg_offsets = &errb_sub_irq_offsets[0],
+ .sub_reg_offsets = &bd96801_errb_sub_irq_offsets[0],
+};
+
+static const struct regmap_irq_chip bd96802_irq_chip_errb = {
+ .name = "bd96802-irq-errb",
+ .domain_suffix = "errb",
+ .main_status = BD96801_REG_INT_MAIN,
+ .num_main_regs = 1,
+ .irqs = &bd96802_errb_irqs[0],
+ .num_irqs = ARRAY_SIZE(bd96802_errb_irqs),
+ .status_base = BD96801_REG_INT_SYS_ERRB1,
+ .mask_base = BD96801_REG_MASK_SYS_ERRB,
+ .ack_base = BD96801_REG_INT_SYS_ERRB1,
+ .init_ack_masked = true,
+ .num_regs = 5,
+ .irq_reg_stride = 1,
+ .sub_reg_offsets = &bd96802_errb_sub_irq_offsets[0],
};
static const struct regmap_irq_chip bd96801_irq_chip_intb = {
@@ -338,25 +551,124 @@ static const struct regmap_irq_chip bd96801_irq_chip_intb = {
.irq_reg_stride = 1,
};
+static const struct regmap_irq_chip bd96802_irq_chip_intb = {
+ .name = "bd96802-irq-intb",
+ .domain_suffix = "intb",
+ .main_status = BD96801_REG_INT_MAIN,
+ .num_main_regs = 1,
+ .irqs = &bd96802_intb_irqs[0],
+ .num_irqs = ARRAY_SIZE(bd96802_intb_irqs),
+ .status_base = BD96801_REG_INT_SYS_INTB,
+ .mask_base = BD96801_REG_MASK_SYS_INTB,
+ .ack_base = BD96801_REG_INT_SYS_INTB,
+ .init_ack_masked = true,
+ .num_regs = 3,
+ .irq_reg_stride = 1,
+};
+
static const struct regmap_config bd96801_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .volatile_table = &volatile_regs,
+ .volatile_table = &bd96801_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct regmap_config bd96802_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &bd96802_volatile_regs,
.cache_type = REGCACHE_MAPLE,
};
+static const struct bd968xx bd96801_data = {
+ .errb_irqs = bd96801_reg_errb_irqs,
+ .intb_irqs = bd96801_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96801_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96801_reg_intb_irqs),
+ .errb_irq_chip = &bd96801_irq_chip_errb,
+ .intb_irq_chip = &bd96801_irq_chip_intb,
+ .regmap_config = &bd96801_regmap_config,
+ .cells = bd96801_cells,
+ .num_cells = ARRAY_SIZE(bd96801_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
+static const struct bd968xx bd96802_data = {
+ .errb_irqs = bd96802_reg_errb_irqs,
+ .intb_irqs = bd96802_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96802_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96802_reg_intb_irqs),
+ .errb_irq_chip = &bd96802_irq_chip_errb,
+ .intb_irq_chip = &bd96802_irq_chip_intb,
+ .regmap_config = &bd96802_regmap_config,
+ .cells = bd96802_cells,
+ .num_cells = ARRAY_SIZE(bd96802_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
+static const struct bd968xx bd96805_data = {
+ .errb_irqs = bd96801_reg_errb_irqs,
+ .intb_irqs = bd96801_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96801_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96801_reg_intb_irqs),
+ .errb_irq_chip = &bd96801_irq_chip_errb,
+ .intb_irq_chip = &bd96801_irq_chip_intb,
+ .regmap_config = &bd96801_regmap_config,
+ .cells = bd96805_cells,
+ .num_cells = ARRAY_SIZE(bd96805_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
+static struct bd968xx bd96806_data = {
+ .errb_irqs = bd96802_reg_errb_irqs,
+ .intb_irqs = bd96802_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96802_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96802_reg_intb_irqs),
+ .errb_irq_chip = &bd96802_irq_chip_errb,
+ .intb_irq_chip = &bd96802_irq_chip_intb,
+ .regmap_config = &bd96802_regmap_config,
+ .cells = bd96806_cells,
+ .num_cells = ARRAY_SIZE(bd96806_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
static int bd96801_i2c_probe(struct i2c_client *i2c)
{
struct regmap_irq_chip_data *intb_irq_data, *errb_irq_data;
struct irq_domain *intb_domain, *errb_domain;
+ const struct bd968xx *ddata;
const struct fwnode_handle *fwnode;
struct resource *regulator_res;
struct resource wdg_irq;
struct regmap *regmap;
- int intb_irq, errb_irq, num_intb, num_errb = 0;
+ int intb_irq, errb_irq, num_errb = 0;
int num_regu_irqs, wdg_irq_no;
+ unsigned int chip_type;
int i, ret;
+ chip_type = (unsigned int)(uintptr_t)device_get_match_data(&i2c->dev);
+ switch (chip_type) {
+ case ROHM_CHIP_TYPE_BD96801:
+ ddata = &bd96801_data;
+ break;
+ case ROHM_CHIP_TYPE_BD96802:
+ ddata = &bd96802_data;
+ break;
+ case ROHM_CHIP_TYPE_BD96805:
+ ddata = &bd96805_data;
+ break;
+ case ROHM_CHIP_TYPE_BD96806:
+ ddata = &bd96806_data;
+ break;
+ default:
+ dev_err(&i2c->dev, "Unknown IC\n");
+ return -EINVAL;
+ }
+
fwnode = dev_fwnode(&i2c->dev);
if (!fwnode)
return dev_err_probe(&i2c->dev, -EINVAL, "Failed to find fwnode\n");
@@ -365,34 +677,32 @@ static int bd96801_i2c_probe(struct i2c_client *i2c)
if (intb_irq < 0)
return dev_err_probe(&i2c->dev, intb_irq, "INTB IRQ not configured\n");
- num_intb = ARRAY_SIZE(regulator_intb_irqs);
-
/* ERRB may be omitted if processor is powered by the PMIC */
errb_irq = fwnode_irq_get_byname(fwnode, "errb");
- if (errb_irq < 0)
- errb_irq = 0;
+ if (errb_irq == -EPROBE_DEFER)
+ return errb_irq;
- if (errb_irq)
- num_errb = ARRAY_SIZE(regulator_errb_irqs);
+ if (errb_irq > 0)
+ num_errb = ddata->num_errb_irqs;
- num_regu_irqs = num_intb + num_errb;
+ num_regu_irqs = ddata->num_intb_irqs + num_errb;
regulator_res = devm_kcalloc(&i2c->dev, num_regu_irqs,
sizeof(*regulator_res), GFP_KERNEL);
if (!regulator_res)
return -ENOMEM;
- regmap = devm_regmap_init_i2c(i2c, &bd96801_regmap_config);
+ regmap = devm_regmap_init_i2c(i2c, ddata->regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(&i2c->dev, PTR_ERR(regmap),
"Regmap initialization failed\n");
- ret = regmap_write(regmap, BD96801_LOCK_REG, BD96801_UNLOCK);
+ ret = regmap_write(regmap, ddata->unlock_reg, ddata->unlock_val);
if (ret)
return dev_err_probe(&i2c->dev, ret, "Failed to unlock PMIC\n");
ret = devm_regmap_add_irq_chip(&i2c->dev, regmap, intb_irq,
- IRQF_ONESHOT, 0, &bd96801_irq_chip_intb,
+ IRQF_ONESHOT, 0, ddata->intb_irq_chip,
&intb_irq_data);
if (ret)
return dev_err_probe(&i2c->dev, ret, "Failed to add INTB IRQ chip\n");
@@ -404,24 +714,25 @@ static int bd96801_i2c_probe(struct i2c_client *i2c)
* has two domains so we do IRQ mapping here and provide the
* already mapped IRQ numbers to sub-devices.
*/
- for (i = 0; i < num_intb; i++) {
+ for (i = 0; i < ddata->num_intb_irqs; i++) {
struct resource *res = &regulator_res[i];
- *res = regulator_intb_irqs[i];
+ *res = ddata->intb_irqs[i];
res->start = res->end = irq_create_mapping(intb_domain,
res->start);
}
wdg_irq_no = irq_create_mapping(intb_domain, BD96801_WDT_ERR_STAT);
wdg_irq = DEFINE_RES_IRQ_NAMED(wdg_irq_no, "bd96801-wdg");
- bd96801_cells[WDG_CELL].resources = &wdg_irq;
- bd96801_cells[WDG_CELL].num_resources = 1;
+
+ ddata->cells[WDG_CELL].resources = &wdg_irq;
+ ddata->cells[WDG_CELL].num_resources = 1;
if (!num_errb)
goto skip_errb;
ret = devm_regmap_add_irq_chip(&i2c->dev, regmap, errb_irq, IRQF_ONESHOT,
- 0, &bd96801_irq_chip_errb, &errb_irq_data);
+ 0, ddata->errb_irq_chip, &errb_irq_data);
if (ret)
return dev_err_probe(&i2c->dev, ret,
"Failed to add ERRB IRQ chip\n");
@@ -429,18 +740,17 @@ static int bd96801_i2c_probe(struct i2c_client *i2c)
errb_domain = regmap_irq_get_domain(errb_irq_data);
for (i = 0; i < num_errb; i++) {
- struct resource *res = &regulator_res[num_intb + i];
+ struct resource *res = &regulator_res[ddata->num_intb_irqs + i];
- *res = regulator_errb_irqs[i];
+ *res = ddata->errb_irqs[i];
res->start = res->end = irq_create_mapping(errb_domain, res->start);
}
skip_errb:
- bd96801_cells[REGULATOR_CELL].resources = regulator_res;
- bd96801_cells[REGULATOR_CELL].num_resources = num_regu_irqs;
-
- ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, bd96801_cells,
- ARRAY_SIZE(bd96801_cells), NULL, 0, NULL);
+ ddata->cells[REGULATOR_CELL].resources = regulator_res;
+ ddata->cells[REGULATOR_CELL].num_resources = num_regu_irqs;
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, ddata->cells,
+ ddata->num_cells, NULL, 0, NULL);
if (ret)
dev_err_probe(&i2c->dev, ret, "Failed to create subdevices\n");
@@ -448,7 +758,10 @@ skip_errb:
}
static const struct of_device_id bd96801_of_match[] = {
- { .compatible = "rohm,bd96801", },
+ { .compatible = "rohm,bd96801", .data = (void *)ROHM_CHIP_TYPE_BD96801 },
+ { .compatible = "rohm,bd96802", .data = (void *)ROHM_CHIP_TYPE_BD96802 },
+ { .compatible = "rohm,bd96805", .data = (void *)ROHM_CHIP_TYPE_BD96805 },
+ { .compatible = "rohm,bd96806", .data = (void *)ROHM_CHIP_TYPE_BD96806 },
{ }
};
MODULE_DEVICE_TABLE(of, bd96801_of_match);
@@ -476,5 +789,5 @@ static void __exit bd96801_i2c_exit(void)
module_exit(bd96801_i2c_exit);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("ROHM BD96801 Power Management IC driver");
+MODULE_DESCRIPTION("ROHM BD9680X Power Management IC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index 84ebc96f58e4..2204bf1c5a51 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -98,7 +98,11 @@ static int rt5033_i2c_probe(struct i2c_client *i2c)
return ret;
}
- device_init_wakeup(rt5033->dev, rt5033->wakeup);
+ if (rt5033->wakeup) {
+ ret = devm_device_init_wakeup(rt5033->dev);
+ if (ret)
+ return dev_err_probe(rt5033->dev, ret, "Failed to init wakeup\n");
+ }
return 0;
}
diff --git a/drivers/mfd/sec-acpm.c b/drivers/mfd/sec-acpm.c
new file mode 100644
index 000000000000..8b31c816d65b
--- /dev/null
+++ b/drivers/mfd/sec-acpm.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google Inc
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung S2MPG1x ACPM driver
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/rtc.h>
+#include <linux/mfd/samsung/s2mpg10.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+#define ACPM_ADDR_BITS 8
+#define ACPM_MAX_BULK_DATA 8
+
+struct sec_pmic_acpm_platform_data {
+ int device_type;
+
+ unsigned int acpm_chan_id;
+ u8 speedy_channel;
+
+ const struct regmap_config *regmap_cfg_common;
+ const struct regmap_config *regmap_cfg_pmic;
+ const struct regmap_config *regmap_cfg_rtc;
+ const struct regmap_config *regmap_cfg_meter;
+};
+
+static const struct regmap_range s2mpg10_common_registers[] = {
+ regmap_reg_range(0x00, 0x02), /* CHIP_ID_M, INT, INT_MASK */
+ regmap_reg_range(0x0a, 0x0c), /* Speedy control */
+ regmap_reg_range(0x1a, 0x2a), /* Debug */
+};
+
+static const struct regmap_range s2mpg10_common_ro_registers[] = {
+ regmap_reg_range(0x00, 0x01), /* CHIP_ID_M, INT */
+ regmap_reg_range(0x28, 0x2a), /* Debug */
+};
+
+static const struct regmap_range s2mpg10_common_nonvolatile_registers[] = {
+ regmap_reg_range(0x00, 0x00), /* CHIP_ID_M */
+ regmap_reg_range(0x02, 0x02), /* INT_MASK */
+ regmap_reg_range(0x0a, 0x0c), /* Speedy control */
+};
+
+static const struct regmap_range s2mpg10_common_precious_registers[] = {
+ regmap_reg_range(0x01, 0x01), /* INT */
+};
+
+static const struct regmap_access_table s2mpg10_common_wr_table = {
+ .yes_ranges = s2mpg10_common_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_common_registers),
+ .no_ranges = s2mpg10_common_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_common_ro_registers),
+};
+
+static const struct regmap_access_table s2mpg10_common_rd_table = {
+ .yes_ranges = s2mpg10_common_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_common_registers),
+};
+
+static const struct regmap_access_table s2mpg10_common_volatile_table = {
+ .no_ranges = s2mpg10_common_nonvolatile_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_common_nonvolatile_registers),
+};
+
+static const struct regmap_access_table s2mpg10_common_precious_table = {
+ .yes_ranges = s2mpg10_common_precious_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_common_precious_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_common = {
+ .name = "common",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_COMMON_SPD_DEBUG4,
+ .wr_table = &s2mpg10_common_wr_table,
+ .rd_table = &s2mpg10_common_rd_table,
+ .volatile_table = &s2mpg10_common_volatile_table,
+ .precious_table = &s2mpg10_common_precious_table,
+ .num_reg_defaults_raw = S2MPG10_COMMON_SPD_DEBUG4 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range s2mpg10_pmic_registers[] = {
+ regmap_reg_range(0x00, 0xf6), /* All PMIC registers */
+};
+
+static const struct regmap_range s2mpg10_pmic_ro_registers[] = {
+ regmap_reg_range(0x00, 0x05), /* INTx */
+ regmap_reg_range(0x0c, 0x0f), /* STATUSx PWRONSRC OFFSRC */
+ regmap_reg_range(0xc7, 0xc7), /* GPIO input */
+};
+
+static const struct regmap_range s2mpg10_pmic_nonvolatile_registers[] = {
+ regmap_reg_range(0x06, 0x0b), /* INTxM */
+};
+
+static const struct regmap_range s2mpg10_pmic_precious_registers[] = {
+ regmap_reg_range(0x00, 0x05), /* INTx */
+};
+
+static const struct regmap_access_table s2mpg10_pmic_wr_table = {
+ .yes_ranges = s2mpg10_pmic_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_pmic_registers),
+ .no_ranges = s2mpg10_pmic_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_pmic_ro_registers),
+};
+
+static const struct regmap_access_table s2mpg10_pmic_rd_table = {
+ .yes_ranges = s2mpg10_pmic_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_pmic_registers),
+};
+
+static const struct regmap_access_table s2mpg10_pmic_volatile_table = {
+ .no_ranges = s2mpg10_pmic_nonvolatile_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_pmic_nonvolatile_registers),
+};
+
+static const struct regmap_access_table s2mpg10_pmic_precious_table = {
+ .yes_ranges = s2mpg10_pmic_precious_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_pmic_precious_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_pmic = {
+ .name = "pmic",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_PMIC_LDO_SENSE4,
+ .wr_table = &s2mpg10_pmic_wr_table,
+ .rd_table = &s2mpg10_pmic_rd_table,
+ .volatile_table = &s2mpg10_pmic_volatile_table,
+ .precious_table = &s2mpg10_pmic_precious_table,
+ .num_reg_defaults_raw = S2MPG10_PMIC_LDO_SENSE4 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range s2mpg10_rtc_registers[] = {
+ regmap_reg_range(0x00, 0x2b), /* All RTC registers */
+};
+
+static const struct regmap_range s2mpg10_rtc_volatile_registers[] = {
+ regmap_reg_range(0x01, 0x01), /* RTC_UPDATE */
+ regmap_reg_range(0x05, 0x0c), /* Time / date */
+};
+
+static const struct regmap_access_table s2mpg10_rtc_rd_table = {
+ .yes_ranges = s2mpg10_rtc_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_rtc_registers),
+};
+
+static const struct regmap_access_table s2mpg10_rtc_volatile_table = {
+ .yes_ranges = s2mpg10_rtc_volatile_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_rtc_volatile_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_rtc = {
+ .name = "rtc",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_RTC_OSC_CTRL,
+ .rd_table = &s2mpg10_rtc_rd_table,
+ .volatile_table = &s2mpg10_rtc_volatile_table,
+ .num_reg_defaults_raw = S2MPG10_RTC_OSC_CTRL + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range s2mpg10_meter_registers[] = {
+ regmap_reg_range(0x00, 0x21), /* Meter config */
+ regmap_reg_range(0x40, 0x8a), /* Meter data */
+ regmap_reg_range(0xee, 0xee), /* Offset */
+ regmap_reg_range(0xf1, 0xf1), /* Trim */
+};
+
+static const struct regmap_range s2mpg10_meter_ro_registers[] = {
+ regmap_reg_range(0x40, 0x8a), /* Meter data */
+};
+
+static const struct regmap_access_table s2mpg10_meter_wr_table = {
+ .yes_ranges = s2mpg10_meter_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_meter_registers),
+ .no_ranges = s2mpg10_meter_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_meter_ro_registers),
+};
+
+static const struct regmap_access_table s2mpg10_meter_rd_table = {
+ .yes_ranges = s2mpg10_meter_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_meter_registers),
+};
+
+static const struct regmap_access_table s2mpg10_meter_volatile_table = {
+ .yes_ranges = s2mpg10_meter_ro_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_meter_ro_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_meter = {
+ .name = "meter",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_METER_BUCK_METER_TRIM3,
+ .wr_table = &s2mpg10_meter_wr_table,
+ .rd_table = &s2mpg10_meter_rd_table,
+ .volatile_table = &s2mpg10_meter_volatile_table,
+ .num_reg_defaults_raw = S2MPG10_METER_BUCK_METER_TRIM3 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+struct sec_pmic_acpm_shared_bus_context {
+ const struct acpm_handle *acpm;
+ unsigned int acpm_chan_id;
+ u8 speedy_channel;
+};
+
+enum sec_pmic_acpm_accesstype {
+ SEC_PMIC_ACPM_ACCESSTYPE_COMMON = 0x00,
+ SEC_PMIC_ACPM_ACCESSTYPE_PMIC = 0x01,
+ SEC_PMIC_ACPM_ACCESSTYPE_RTC = 0x02,
+ SEC_PMIC_ACPM_ACCESSTYPE_METER = 0x0a,
+ SEC_PMIC_ACPM_ACCESSTYPE_WLWP = 0x0b,
+ SEC_PMIC_ACPM_ACCESSTYPE_TRIM = 0x0f,
+};
+
+struct sec_pmic_acpm_bus_context {
+ struct sec_pmic_acpm_shared_bus_context *shared;
+ enum sec_pmic_acpm_accesstype type;
+};
+
+static int sec_pmic_acpm_bus_write(void *context, const void *data,
+ size_t count)
+{
+ struct sec_pmic_acpm_bus_context *ctx = context;
+ const struct acpm_handle *acpm = ctx->shared->acpm;
+ const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+ size_t val_count = count - BITS_TO_BYTES(ACPM_ADDR_BITS);
+ const u8 *d = data;
+ const u8 *vals = &d[BITS_TO_BYTES(ACPM_ADDR_BITS)];
+ u8 reg;
+
+ if (val_count < 1 || val_count > ACPM_MAX_BULK_DATA)
+ return -EINVAL;
+
+ reg = d[0];
+
+ return pmic_ops->bulk_write(acpm, ctx->shared->acpm_chan_id, ctx->type, reg,
+ ctx->shared->speedy_channel, val_count, vals);
+}
+
+static int sec_pmic_acpm_bus_read(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size)
+{
+ struct sec_pmic_acpm_bus_context *ctx = context;
+ const struct acpm_handle *acpm = ctx->shared->acpm;
+ const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+ const u8 *r = reg_buf;
+ u8 reg;
+
+ if (reg_size != BITS_TO_BYTES(ACPM_ADDR_BITS) || !val_size ||
+ val_size > ACPM_MAX_BULK_DATA)
+ return -EINVAL;
+
+ reg = r[0];
+
+ return pmic_ops->bulk_read(acpm, ctx->shared->acpm_chan_id, ctx->type, reg,
+ ctx->shared->speedy_channel, val_size, val_buf);
+}
+
+static int sec_pmic_acpm_bus_reg_update_bits(void *context, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ struct sec_pmic_acpm_bus_context *ctx = context;
+ const struct acpm_handle *acpm = ctx->shared->acpm;
+ const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+
+ return pmic_ops->update_reg(acpm, ctx->shared->acpm_chan_id, ctx->type, reg & 0xff,
+ ctx->shared->speedy_channel, val, mask);
+}
+
+static const struct regmap_bus sec_pmic_acpm_regmap_bus = {
+ .write = sec_pmic_acpm_bus_write,
+ .read = sec_pmic_acpm_bus_read,
+ .reg_update_bits = sec_pmic_acpm_bus_reg_update_bits,
+ .max_raw_read = ACPM_MAX_BULK_DATA,
+ .max_raw_write = ACPM_MAX_BULK_DATA,
+};
+
+static struct regmap *sec_pmic_acpm_regmap_init(struct device *dev,
+ struct sec_pmic_acpm_shared_bus_context *shared_ctx,
+ enum sec_pmic_acpm_accesstype type,
+ const struct regmap_config *cfg, bool do_attach)
+{
+ struct sec_pmic_acpm_bus_context *ctx;
+ struct regmap *regmap;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->shared = shared_ctx;
+ ctx->type = type;
+
+ regmap = devm_regmap_init(dev, &sec_pmic_acpm_regmap_bus, ctx, cfg);
+ if (IS_ERR(regmap))
+ return dev_err_cast_probe(dev, regmap, "regmap init (%s) failed\n", cfg->name);
+
+ if (do_attach) {
+ int ret;
+
+ ret = regmap_attach_dev(dev, regmap, cfg);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret, "regmap attach (%s) failed\n",
+ cfg->name);
+ }
+
+ return regmap;
+}
+
+static void sec_pmic_acpm_mask_common_irqs(void *regmap_common)
+{
+ regmap_write(regmap_common, S2MPG10_COMMON_INT_MASK, S2MPG10_COMMON_INT_SRC);
+}
+
+static int sec_pmic_acpm_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap_common, *regmap_pmic, *regmap;
+ const struct sec_pmic_acpm_platform_data *pdata;
+ struct sec_pmic_acpm_shared_bus_context *shared_ctx;
+ const struct acpm_handle *acpm;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ pdata = device_get_match_data(dev);
+ if (!pdata)
+ return dev_err_probe(dev, -ENODEV, "unsupported device type\n");
+
+ acpm = devm_acpm_get_by_node(dev, dev->parent->of_node);
+ if (IS_ERR(acpm))
+ return dev_err_probe(dev, PTR_ERR(acpm), "failed to get acpm\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ shared_ctx = devm_kzalloc(dev, sizeof(*shared_ctx), GFP_KERNEL);
+ if (!shared_ctx)
+ return -ENOMEM;
+
+ shared_ctx->acpm = acpm;
+ shared_ctx->acpm_chan_id = pdata->acpm_chan_id;
+ shared_ctx->speedy_channel = pdata->speedy_channel;
+
+ regmap_common = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_COMMON,
+ pdata->regmap_cfg_common, false);
+ if (IS_ERR(regmap_common))
+ return PTR_ERR(regmap_common);
+
+ /* Mask all interrupts from 'common' block, until successful init */
+ ret = regmap_write(regmap_common, S2MPG10_COMMON_INT_MASK, S2MPG10_COMMON_INT_SRC);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to mask common block interrupts\n");
+
+ regmap_pmic = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_PMIC,
+ pdata->regmap_cfg_pmic, false);
+ if (IS_ERR(regmap_pmic))
+ return PTR_ERR(regmap_pmic);
+
+ regmap = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_RTC,
+ pdata->regmap_cfg_rtc, true);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ regmap = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_METER,
+ pdata->regmap_cfg_meter, true);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = sec_pmic_probe(dev, pdata->device_type, irq, regmap_pmic, NULL);
+ if (ret)
+ return ret;
+
+ if (device_property_read_bool(dev, "wakeup-source"))
+ devm_device_init_wakeup(dev);
+
+ /* Unmask PMIC interrupt from 'common' block, now that everything is in place. */
+ ret = regmap_clear_bits(regmap_common, S2MPG10_COMMON_INT_MASK,
+ S2MPG10_COMMON_INT_SRC_PMIC);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to unmask PMIC interrupt\n");
+
+ /* Mask all interrupts from 'common' block on shutdown */
+ ret = devm_add_action_or_reset(dev, sec_pmic_acpm_mask_common_irqs, regmap_common);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sec_pmic_acpm_shutdown(struct platform_device *pdev)
+{
+ sec_pmic_shutdown(&pdev->dev);
+}
+
+static const struct sec_pmic_acpm_platform_data s2mpg10_data = {
+ .device_type = S2MPG10,
+ .acpm_chan_id = 2,
+ .speedy_channel = 0,
+ .regmap_cfg_common = &s2mpg10_regmap_config_common,
+ .regmap_cfg_pmic = &s2mpg10_regmap_config_pmic,
+ .regmap_cfg_rtc = &s2mpg10_regmap_config_rtc,
+ .regmap_cfg_meter = &s2mpg10_regmap_config_meter,
+};
+
+static const struct of_device_id sec_pmic_acpm_of_match[] = {
+ { .compatible = "samsung,s2mpg10-pmic", .data = &s2mpg10_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sec_pmic_acpm_of_match);
+
+static struct platform_driver sec_pmic_acpm_driver = {
+ .driver = {
+ .name = "sec-pmic-acpm",
+ .pm = pm_sleep_ptr(&sec_pmic_pm_ops),
+ .of_match_table = sec_pmic_acpm_of_match,
+ },
+ .probe = sec_pmic_acpm_probe,
+ .shutdown = sec_pmic_acpm_shutdown,
+};
+module_platform_driver(sec_pmic_acpm_driver);
+
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("ACPM driver for the Samsung S2MPG1x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-common.c b/drivers/mfd/sec-common.c
new file mode 100644
index 000000000000..42d55e70e34c
--- /dev/null
+++ b/drivers/mfd/sec-common.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung SxM core driver
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps13.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+static const struct mfd_cell s5m8767_devs[] = {
+ MFD_CELL_NAME("s5m8767-pmic"),
+ MFD_CELL_NAME("s5m-rtc"),
+ MFD_CELL_OF("s5m8767-clk", NULL, NULL, 0, 0, "samsung,s5m8767-clk"),
+};
+
+static const struct mfd_cell s2dos05_devs[] = {
+ MFD_CELL_NAME("s2dos05-regulator"),
+};
+
+static const struct mfd_cell s2mpg10_devs[] = {
+ MFD_CELL_NAME("s2mpg10-meter"),
+ MFD_CELL_NAME("s2mpg10-regulator"),
+ MFD_CELL_NAME("s2mpg10-rtc"),
+ MFD_CELL_OF("s2mpg10-clk", NULL, NULL, 0, 0, "samsung,s2mpg10-clk"),
+ MFD_CELL_OF("s2mpg10-gpio", NULL, NULL, 0, 0, "samsung,s2mpg10-gpio"),
+};
+
+static const struct mfd_cell s2mps11_devs[] = {
+ MFD_CELL_NAME("s2mps11-regulator"),
+ MFD_CELL_NAME("s2mps14-rtc"),
+ MFD_CELL_OF("s2mps11-clk", NULL, NULL, 0, 0, "samsung,s2mps11-clk"),
+};
+
+static const struct mfd_cell s2mps13_devs[] = {
+ MFD_CELL_NAME("s2mps13-regulator"),
+ MFD_CELL_NAME("s2mps13-rtc"),
+ MFD_CELL_OF("s2mps13-clk", NULL, NULL, 0, 0, "samsung,s2mps13-clk"),
+};
+
+static const struct mfd_cell s2mps14_devs[] = {
+ MFD_CELL_NAME("s2mps14-regulator"),
+ MFD_CELL_NAME("s2mps14-rtc"),
+ MFD_CELL_OF("s2mps14-clk", NULL, NULL, 0, 0, "samsung,s2mps14-clk"),
+};
+
+static const struct mfd_cell s2mps15_devs[] = {
+ MFD_CELL_NAME("s2mps15-regulator"),
+ MFD_CELL_NAME("s2mps15-rtc"),
+ MFD_CELL_OF("s2mps13-clk", NULL, NULL, 0, 0, "samsung,s2mps13-clk"),
+};
+
+static const struct mfd_cell s2mpa01_devs[] = {
+ MFD_CELL_NAME("s2mpa01-pmic"),
+ MFD_CELL_NAME("s2mps14-rtc"),
+};
+
+static const struct mfd_cell s2mpu02_devs[] = {
+ MFD_CELL_NAME("s2mpu02-regulator"),
+};
+
+static const struct mfd_cell s2mpu05_devs[] = {
+ MFD_CELL_NAME("s2mpu05-regulator"),
+ MFD_CELL_NAME("s2mps15-rtc"),
+};
+
+static void sec_pmic_dump_rev(struct sec_pmic_dev *sec_pmic)
+{
+ unsigned int val;
+
+ /* For s2mpg1x, the revision is in a different regmap */
+ if (sec_pmic->device_type == S2MPG10)
+ return;
+
+ /* For each device type, the REG_ID is always the first register */
+ if (!regmap_read(sec_pmic->regmap_pmic, S2MPS11_REG_ID, &val))
+ dev_dbg(sec_pmic->dev, "Revision: 0x%x\n", val);
+}
+
+static void sec_pmic_configure(struct sec_pmic_dev *sec_pmic)
+{
+ int err;
+
+ if (sec_pmic->device_type != S2MPS13X)
+ return;
+
+ if (sec_pmic->pdata->disable_wrstbi) {
+ /*
+ * If WRSTBI pin is pulled down this feature must be disabled
+ * because each Suspend to RAM will trigger buck voltage reset
+ * to default values.
+ */
+ err = regmap_update_bits(sec_pmic->regmap_pmic,
+ S2MPS13_REG_WRSTBI,
+ S2MPS13_REG_WRSTBI_MASK, 0x0);
+ if (err)
+ dev_warn(sec_pmic->dev,
+ "Cannot initialize WRSTBI config: %d\n",
+ err);
+ }
+}
+
+/*
+ * Only the common platform data elements for s5m8767 are parsed here from the
+ * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
+ * others have to parse their own platform data elements from device tree.
+ *
+ * The s5m8767 platform data structure is instantiated here and the drivers for
+ * the sub-modules need not instantiate another instance while parsing their
+ * platform data.
+ */
+static struct sec_platform_data *
+sec_pmic_parse_dt_pdata(struct device *dev)
+{
+ struct sec_platform_data *pd;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->manual_poweroff = of_property_read_bool(dev->of_node,
+ "samsung,s2mps11-acokb-ground");
+ pd->disable_wrstbi = of_property_read_bool(dev->of_node,
+ "samsung,s2mps11-wrstbi-ground");
+ return pd;
+}
+
+int sec_pmic_probe(struct device *dev, int device_type, unsigned int irq,
+ struct regmap *regmap, struct i2c_client *client)
+{
+ struct sec_platform_data *pdata;
+ const struct mfd_cell *sec_devs;
+ struct sec_pmic_dev *sec_pmic;
+ int ret, num_sec_devs;
+
+ sec_pmic = devm_kzalloc(dev, sizeof(*sec_pmic), GFP_KERNEL);
+ if (!sec_pmic)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, sec_pmic);
+ sec_pmic->dev = dev;
+ sec_pmic->device_type = device_type;
+ sec_pmic->i2c = client;
+ sec_pmic->irq = irq;
+ sec_pmic->regmap_pmic = regmap;
+
+ pdata = sec_pmic_parse_dt_pdata(sec_pmic->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ return ret;
+ }
+
+ sec_pmic->pdata = pdata;
+
+ ret = sec_irq_init(sec_pmic);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_active(sec_pmic->dev);
+
+ switch (sec_pmic->device_type) {
+ case S5M8767X:
+ sec_devs = s5m8767_devs;
+ num_sec_devs = ARRAY_SIZE(s5m8767_devs);
+ break;
+ case S2DOS05:
+ sec_devs = s2dos05_devs;
+ num_sec_devs = ARRAY_SIZE(s2dos05_devs);
+ break;
+ case S2MPA01:
+ sec_devs = s2mpa01_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpa01_devs);
+ break;
+ case S2MPG10:
+ sec_devs = s2mpg10_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpg10_devs);
+ break;
+ case S2MPS11X:
+ sec_devs = s2mps11_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps11_devs);
+ break;
+ case S2MPS13X:
+ sec_devs = s2mps13_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps13_devs);
+ break;
+ case S2MPS14X:
+ sec_devs = s2mps14_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps14_devs);
+ break;
+ case S2MPS15X:
+ sec_devs = s2mps15_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps15_devs);
+ break;
+ case S2MPU02:
+ sec_devs = s2mpu02_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpu02_devs);
+ break;
+ case S2MPU05:
+ sec_devs = s2mpu05_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpu05_devs);
+ break;
+ default:
+ return dev_err_probe(sec_pmic->dev, -EINVAL,
+ "Unsupported device type %d\n",
+ sec_pmic->device_type);
+ }
+ ret = devm_mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs,
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ sec_pmic_configure(sec_pmic);
+ sec_pmic_dump_rev(sec_pmic);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sec_pmic_probe);
+
+void sec_pmic_shutdown(struct device *dev)
+{
+ struct sec_pmic_dev *sec_pmic = dev_get_drvdata(dev);
+ unsigned int reg, mask;
+
+ if (!sec_pmic->pdata->manual_poweroff)
+ return;
+
+ switch (sec_pmic->device_type) {
+ case S2MPS11X:
+ reg = S2MPS11_REG_CTRL1;
+ mask = S2MPS11_CTRL1_PWRHOLD_MASK;
+ break;
+ default:
+ /*
+ * Currently only one board with S2MPS11 needs this, so just
+ * ignore the rest.
+ */
+ dev_warn(sec_pmic->dev,
+ "Unsupported device %d for manual power off\n",
+ sec_pmic->device_type);
+ return;
+ }
+
+ regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, 0);
+}
+EXPORT_SYMBOL_GPL(sec_pmic_shutdown);
+
+static int sec_pmic_suspend(struct device *dev)
+{
+ struct sec_pmic_dev *sec_pmic = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(sec_pmic->irq);
+ /*
+ * PMIC IRQ must be disabled during suspend for RTC alarm
+ * to work properly.
+ * When device is woken up from suspend, an
+ * interrupt occurs before resuming I2C bus controller.
+ * The interrupt is handled by regmap_irq_thread which tries
+ * to read RTC registers. This read fails (I2C is still
+ * suspended) and RTC Alarm interrupt is disabled.
+ */
+ disable_irq(sec_pmic->irq);
+
+ return 0;
+}
+
+static int sec_pmic_resume(struct device *dev)
+{
+ struct sec_pmic_dev *sec_pmic = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(sec_pmic->irq);
+ enable_irq(sec_pmic->irq);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
+EXPORT_SYMBOL_GPL(sec_pmic_pm_ops);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("Core driver for the Samsung S5M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
deleted file mode 100644
index 3e9b65c988a7..000000000000
--- a/drivers/mfd/sec-core.c
+++ /dev/null
@@ -1,481 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-//
-// Copyright (c) 2012 Samsung Electronics Co., Ltd
-// http://www.samsung.com
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/of.h>
-#include <linux/interrupt.h>
-#include <linux/pm_runtime.h>
-#include <linux/mutex.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/samsung/core.h>
-#include <linux/mfd/samsung/irq.h>
-#include <linux/mfd/samsung/s2mpa01.h>
-#include <linux/mfd/samsung/s2mps11.h>
-#include <linux/mfd/samsung/s2mps13.h>
-#include <linux/mfd/samsung/s2mps14.h>
-#include <linux/mfd/samsung/s2mps15.h>
-#include <linux/mfd/samsung/s2mpu02.h>
-#include <linux/mfd/samsung/s5m8767.h>
-#include <linux/regmap.h>
-
-static const struct mfd_cell s5m8767_devs[] = {
- { .name = "s5m8767-pmic", },
- { .name = "s5m-rtc", },
- {
- .name = "s5m8767-clk",
- .of_compatible = "samsung,s5m8767-clk",
- },
-};
-
-static const struct mfd_cell s2dos05_devs[] = {
- { .name = "s2dos05-regulator", },
-};
-
-static const struct mfd_cell s2mps11_devs[] = {
- { .name = "s2mps11-regulator", },
- { .name = "s2mps14-rtc", },
- {
- .name = "s2mps11-clk",
- .of_compatible = "samsung,s2mps11-clk",
- },
-};
-
-static const struct mfd_cell s2mps13_devs[] = {
- { .name = "s2mps13-regulator", },
- { .name = "s2mps13-rtc", },
- {
- .name = "s2mps13-clk",
- .of_compatible = "samsung,s2mps13-clk",
- },
-};
-
-static const struct mfd_cell s2mps14_devs[] = {
- { .name = "s2mps14-regulator", },
- { .name = "s2mps14-rtc", },
- {
- .name = "s2mps14-clk",
- .of_compatible = "samsung,s2mps14-clk",
- },
-};
-
-static const struct mfd_cell s2mps15_devs[] = {
- { .name = "s2mps15-regulator", },
- { .name = "s2mps15-rtc", },
- {
- .name = "s2mps13-clk",
- .of_compatible = "samsung,s2mps13-clk",
- },
-};
-
-static const struct mfd_cell s2mpa01_devs[] = {
- { .name = "s2mpa01-pmic", },
- { .name = "s2mps14-rtc", },
-};
-
-static const struct mfd_cell s2mpu02_devs[] = {
- { .name = "s2mpu02-regulator", },
-};
-
-static const struct mfd_cell s2mpu05_devs[] = {
- { .name = "s2mpu05-regulator", },
- { .name = "s2mps15-rtc", },
-};
-
-static const struct of_device_id sec_dt_match[] = {
- {
- .compatible = "samsung,s5m8767-pmic",
- .data = (void *)S5M8767X,
- }, {
- .compatible = "samsung,s2dos05",
- .data = (void *)S2DOS05,
- }, {
- .compatible = "samsung,s2mps11-pmic",
- .data = (void *)S2MPS11X,
- }, {
- .compatible = "samsung,s2mps13-pmic",
- .data = (void *)S2MPS13X,
- }, {
- .compatible = "samsung,s2mps14-pmic",
- .data = (void *)S2MPS14X,
- }, {
- .compatible = "samsung,s2mps15-pmic",
- .data = (void *)S2MPS15X,
- }, {
- .compatible = "samsung,s2mpa01-pmic",
- .data = (void *)S2MPA01,
- }, {
- .compatible = "samsung,s2mpu02-pmic",
- .data = (void *)S2MPU02,
- }, {
- .compatible = "samsung,s2mpu05-pmic",
- .data = (void *)S2MPU05,
- }, {
- /* Sentinel */
- },
-};
-MODULE_DEVICE_TABLE(of, sec_dt_match);
-
-static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S2MPA01_REG_INT1M:
- case S2MPA01_REG_INT2M:
- case S2MPA01_REG_INT3M:
- return false;
- default:
- return true;
- }
-}
-
-static bool s2mps11_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S2MPS11_REG_INT1M:
- case S2MPS11_REG_INT2M:
- case S2MPS11_REG_INT3M:
- return false;
- default:
- return true;
- }
-}
-
-static bool s2mpu02_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S2MPU02_REG_INT1M:
- case S2MPU02_REG_INT2M:
- case S2MPU02_REG_INT3M:
- return false;
- default:
- return true;
- }
-}
-
-static const struct regmap_config sec_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
-static const struct regmap_config s2mpa01_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPA01_REG_LDO_OVCB4,
- .volatile_reg = s2mpa01_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps11_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS11_REG_L38CTRL,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps13_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS13_REG_LDODSCH5,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps14_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS14_REG_LDODSCH3,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps15_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS15_REG_LDODSCH4,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mpu02_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPU02_REG_DVSDATA,
- .volatile_reg = s2mpu02_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s5m8767_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S5M8767_REG_LDO28CTRL,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static void sec_pmic_dump_rev(struct sec_pmic_dev *sec_pmic)
-{
- unsigned int val;
-
- /* For each device type, the REG_ID is always the first register */
- if (!regmap_read(sec_pmic->regmap_pmic, S2MPS11_REG_ID, &val))
- dev_dbg(sec_pmic->dev, "Revision: 0x%x\n", val);
-}
-
-static void sec_pmic_configure(struct sec_pmic_dev *sec_pmic)
-{
- int err;
-
- if (sec_pmic->device_type != S2MPS13X)
- return;
-
- if (sec_pmic->pdata->disable_wrstbi) {
- /*
- * If WRSTBI pin is pulled down this feature must be disabled
- * because each Suspend to RAM will trigger buck voltage reset
- * to default values.
- */
- err = regmap_update_bits(sec_pmic->regmap_pmic,
- S2MPS13_REG_WRSTBI,
- S2MPS13_REG_WRSTBI_MASK, 0x0);
- if (err)
- dev_warn(sec_pmic->dev,
- "Cannot initialize WRSTBI config: %d\n",
- err);
- }
-}
-
-/*
- * Only the common platform data elements for s5m8767 are parsed here from the
- * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
- * others have to parse their own platform data elements from device tree.
- *
- * The s5m8767 platform data structure is instantiated here and the drivers for
- * the sub-modules need not instantiate another instance while parsing their
- * platform data.
- */
-static struct sec_platform_data *
-sec_pmic_i2c_parse_dt_pdata(struct device *dev)
-{
- struct sec_platform_data *pd;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
- pd->manual_poweroff = of_property_read_bool(dev->of_node,
- "samsung,s2mps11-acokb-ground");
- pd->disable_wrstbi = of_property_read_bool(dev->of_node,
- "samsung,s2mps11-wrstbi-ground");
- return pd;
-}
-
-static int sec_pmic_probe(struct i2c_client *i2c)
-{
- const struct regmap_config *regmap;
- struct sec_platform_data *pdata;
- const struct mfd_cell *sec_devs;
- struct sec_pmic_dev *sec_pmic;
- int ret, num_sec_devs;
-
- sec_pmic = devm_kzalloc(&i2c->dev, sizeof(struct sec_pmic_dev),
- GFP_KERNEL);
- if (sec_pmic == NULL)
- return -ENOMEM;
-
- i2c_set_clientdata(i2c, sec_pmic);
- sec_pmic->dev = &i2c->dev;
- sec_pmic->i2c = i2c;
- sec_pmic->irq = i2c->irq;
-
- pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- return ret;
- }
-
- sec_pmic->device_type = (unsigned long)of_device_get_match_data(sec_pmic->dev);
- sec_pmic->pdata = pdata;
-
- switch (sec_pmic->device_type) {
- case S2MPA01:
- regmap = &s2mpa01_regmap_config;
- break;
- case S2MPS11X:
- regmap = &s2mps11_regmap_config;
- break;
- case S2MPS13X:
- regmap = &s2mps13_regmap_config;
- break;
- case S2MPS14X:
- regmap = &s2mps14_regmap_config;
- break;
- case S2MPS15X:
- regmap = &s2mps15_regmap_config;
- break;
- case S5M8767X:
- regmap = &s5m8767_regmap_config;
- break;
- case S2MPU02:
- regmap = &s2mpu02_regmap_config;
- break;
- default:
- regmap = &sec_regmap_config;
- break;
- }
-
- sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap);
- if (IS_ERR(sec_pmic->regmap_pmic)) {
- ret = PTR_ERR(sec_pmic->regmap_pmic);
- dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
- ret);
- return ret;
- }
-
- sec_irq_init(sec_pmic);
-
- pm_runtime_set_active(sec_pmic->dev);
-
- switch (sec_pmic->device_type) {
- case S5M8767X:
- sec_devs = s5m8767_devs;
- num_sec_devs = ARRAY_SIZE(s5m8767_devs);
- break;
- case S2DOS05:
- sec_devs = s2dos05_devs;
- num_sec_devs = ARRAY_SIZE(s2dos05_devs);
- break;
- case S2MPA01:
- sec_devs = s2mpa01_devs;
- num_sec_devs = ARRAY_SIZE(s2mpa01_devs);
- break;
- case S2MPS11X:
- sec_devs = s2mps11_devs;
- num_sec_devs = ARRAY_SIZE(s2mps11_devs);
- break;
- case S2MPS13X:
- sec_devs = s2mps13_devs;
- num_sec_devs = ARRAY_SIZE(s2mps13_devs);
- break;
- case S2MPS14X:
- sec_devs = s2mps14_devs;
- num_sec_devs = ARRAY_SIZE(s2mps14_devs);
- break;
- case S2MPS15X:
- sec_devs = s2mps15_devs;
- num_sec_devs = ARRAY_SIZE(s2mps15_devs);
- break;
- case S2MPU02:
- sec_devs = s2mpu02_devs;
- num_sec_devs = ARRAY_SIZE(s2mpu02_devs);
- break;
- case S2MPU05:
- sec_devs = s2mpu05_devs;
- num_sec_devs = ARRAY_SIZE(s2mpu05_devs);
- break;
- default:
- dev_err(&i2c->dev, "Unsupported device type (%lu)\n",
- sec_pmic->device_type);
- return -ENODEV;
- }
- ret = devm_mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs,
- NULL, 0, NULL);
- if (ret)
- return ret;
-
- sec_pmic_configure(sec_pmic);
- sec_pmic_dump_rev(sec_pmic);
-
- return ret;
-}
-
-static void sec_pmic_shutdown(struct i2c_client *i2c)
-{
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
- unsigned int reg, mask;
-
- if (!sec_pmic->pdata->manual_poweroff)
- return;
-
- switch (sec_pmic->device_type) {
- case S2MPS11X:
- reg = S2MPS11_REG_CTRL1;
- mask = S2MPS11_CTRL1_PWRHOLD_MASK;
- break;
- default:
- /*
- * Currently only one board with S2MPS11 needs this, so just
- * ignore the rest.
- */
- dev_warn(sec_pmic->dev,
- "Unsupported device %lu for manual power off\n",
- sec_pmic->device_type);
- return;
- }
-
- regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, 0);
-}
-
-static int sec_pmic_suspend(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(sec_pmic->irq);
- /*
- * PMIC IRQ must be disabled during suspend for RTC alarm
- * to work properly.
- * When device is woken up from suspend, an
- * interrupt occurs before resuming I2C bus controller.
- * The interrupt is handled by regmap_irq_thread which tries
- * to read RTC registers. This read fails (I2C is still
- * suspended) and RTC Alarm interrupt is disabled.
- */
- disable_irq(sec_pmic->irq);
-
- return 0;
-}
-
-static int sec_pmic_resume(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(sec_pmic->irq);
- enable_irq(sec_pmic->irq);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops,
- sec_pmic_suspend, sec_pmic_resume);
-
-static struct i2c_driver sec_pmic_driver = {
- .driver = {
- .name = "sec_pmic",
- .pm = pm_sleep_ptr(&sec_pmic_pm_ops),
- .of_match_table = sec_dt_match,
- },
- .probe = sec_pmic_probe,
- .shutdown = sec_pmic_shutdown,
-};
-module_i2c_driver(sec_pmic_driver);
-
-MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("Core support for the S5M MFD");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-core.h b/drivers/mfd/sec-core.h
new file mode 100644
index 000000000000..92c7558ab8b0
--- /dev/null
+++ b/drivers/mfd/sec-core.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung SxM core driver internal data
+ */
+
+#ifndef __SEC_CORE_INT_H
+#define __SEC_CORE_INT_H
+
+struct i2c_client;
+
+extern const struct dev_pm_ops sec_pmic_pm_ops;
+
+int sec_pmic_probe(struct device *dev, int device_type, unsigned int irq,
+ struct regmap *regmap, struct i2c_client *client);
+void sec_pmic_shutdown(struct device *dev);
+
+int sec_irq_init(struct sec_pmic_dev *sec_pmic);
+
+#endif /* __SEC_CORE_INT_H */
diff --git a/drivers/mfd/sec-i2c.c b/drivers/mfd/sec-i2c.c
new file mode 100644
index 000000000000..3132b849b4bc
--- /dev/null
+++ b/drivers/mfd/sec-i2c.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung SxM I2C driver
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/s2mpa01.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps13.h>
+#include <linux/mfd/samsung/s2mps14.h>
+#include <linux/mfd/samsung/s2mps15.h>
+#include <linux/mfd/samsung/s2mpu02.h>
+#include <linux/mfd/samsung/s5m8767.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+struct sec_pmic_i2c_platform_data {
+ const struct regmap_config *regmap_cfg;
+ int device_type;
+};
+
+static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPA01_REG_INT1M:
+ case S2MPA01_REG_INT2M:
+ case S2MPA01_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool s2mps11_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPS11_REG_INT1M:
+ case S2MPS11_REG_INT2M:
+ case S2MPS11_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool s2mpu02_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPU02_REG_INT1M:
+ case S2MPU02_REG_INT2M:
+ case S2MPU02_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config s2dos05_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regmap_config s2mpa01_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPA01_REG_LDO_OVCB4,
+ .volatile_reg = s2mpa01_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps11_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS11_REG_L38CTRL,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps13_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS13_REG_LDODSCH5,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps14_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS14_REG_LDODSCH3,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps15_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS15_REG_LDODSCH4,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mpu02_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPU02_REG_DVSDATA,
+ .volatile_reg = s2mpu02_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mpu05_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regmap_config s5m8767_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S5M8767_REG_LDO28CTRL,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int sec_pmic_i2c_probe(struct i2c_client *client)
+{
+ const struct sec_pmic_i2c_platform_data *pdata;
+ struct regmap *regmap_pmic;
+
+ pdata = device_get_match_data(&client->dev);
+ if (!pdata)
+ return dev_err_probe(&client->dev, -ENODEV,
+ "Unsupported device type\n");
+
+ regmap_pmic = devm_regmap_init_i2c(client, pdata->regmap_cfg);
+ if (IS_ERR(regmap_pmic))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap_pmic),
+ "regmap init failed\n");
+
+ return sec_pmic_probe(&client->dev, pdata->device_type, client->irq,
+ regmap_pmic, client);
+}
+
+static void sec_pmic_i2c_shutdown(struct i2c_client *i2c)
+{
+ sec_pmic_shutdown(&i2c->dev);
+}
+
+static const struct sec_pmic_i2c_platform_data s2dos05_data = {
+ .regmap_cfg = &s2dos05_regmap_config,
+ .device_type = S2DOS05
+};
+
+static const struct sec_pmic_i2c_platform_data s2mpa01_data = {
+ .regmap_cfg = &s2mpa01_regmap_config,
+ .device_type = S2MPA01,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps11_data = {
+ .regmap_cfg = &s2mps11_regmap_config,
+ .device_type = S2MPS11X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps13_data = {
+ .regmap_cfg = &s2mps13_regmap_config,
+ .device_type = S2MPS13X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps14_data = {
+ .regmap_cfg = &s2mps14_regmap_config,
+ .device_type = S2MPS14X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps15_data = {
+ .regmap_cfg = &s2mps15_regmap_config,
+ .device_type = S2MPS15X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mpu02_data = {
+ .regmap_cfg = &s2mpu02_regmap_config,
+ .device_type = S2MPU02,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mpu05_data = {
+ .regmap_cfg = &s2mpu05_regmap_config,
+ .device_type = S2MPU05,
+};
+
+static const struct sec_pmic_i2c_platform_data s5m8767_data = {
+ .regmap_cfg = &s5m8767_regmap_config,
+ .device_type = S5M8767X,
+};
+
+static const struct of_device_id sec_pmic_i2c_of_match[] = {
+ { .compatible = "samsung,s2dos05", .data = &s2dos05_data, },
+ { .compatible = "samsung,s2mpa01-pmic", .data = &s2mpa01_data, },
+ { .compatible = "samsung,s2mps11-pmic", .data = &s2mps11_data, },
+ { .compatible = "samsung,s2mps13-pmic", .data = &s2mps13_data, },
+ { .compatible = "samsung,s2mps14-pmic", .data = &s2mps14_data, },
+ { .compatible = "samsung,s2mps15-pmic", .data = &s2mps15_data, },
+ { .compatible = "samsung,s2mpu02-pmic", .data = &s2mpu02_data, },
+ { .compatible = "samsung,s2mpu05-pmic", .data = &s2mpu05_data, },
+ { .compatible = "samsung,s5m8767-pmic", .data = &s5m8767_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sec_pmic_i2c_of_match);
+
+static struct i2c_driver sec_pmic_i2c_driver = {
+ .driver = {
+ .name = "sec-pmic-i2c",
+ .pm = pm_sleep_ptr(&sec_pmic_pm_ops),
+ .of_match_table = sec_pmic_i2c_of_match,
+ },
+ .probe = sec_pmic_i2c_probe,
+ .shutdown = sec_pmic_i2c_shutdown,
+};
+module_i2c_driver(sec_pmic_i2c_driver);
+
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("I2C driver for the Samsung S5M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 047fc065fcf1..c5c80b1ba104 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -3,227 +3,139 @@
// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
// http://www.samsung.com
-#include <linux/device.h>
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/dev_printk.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/s2mpg10.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s2mpu02.h>
#include <linux/mfd/samsung/s2mpu05.h>
#include <linux/mfd/samsung/s5m8767.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+static const struct regmap_irq s2mpg10_irqs[] = {
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWRONF, 0, S2MPG10_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWRONR, 0, S2MPG10_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_JIGONBF, 0, S2MPG10_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_JIGONBR, 0, S2MPG10_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_ACOKBF, 0, S2MPG10_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_ACOKBR, 0, S2MPG10_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWRON1S, 0, S2MPG10_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_MRB, 0, S2MPG10_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTC60S, 1, S2MPG10_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTCA1, 1, S2MPG10_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTCA0, 1, S2MPG10_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTC1S, 1, S2MPG10_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WTSR_COLDRST, 1, S2MPG10_IRQ_WTSR_COLDRST_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WTSR, 1, S2MPG10_IRQ_WTSR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WRST, 1, S2MPG10_IRQ_WRST_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SMPL, 1, S2MPG10_IRQ_SMPL_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_120C, 2, S2MPG10_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_140C, 2, S2MPG10_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_TSD, 2, S2MPG10_IRQ_TSD_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PIF_TIMEOUT1, 2, S2MPG10_IRQ_PIF_TIMEOUT1_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PIF_TIMEOUT2, 2, S2MPG10_IRQ_PIF_TIMEOUT2_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SPD_PARITY_ERR, 2, S2MPG10_IRQ_SPD_PARITY_ERR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SPD_ABNORMAL_STOP, 2, S2MPG10_IRQ_SPD_ABNORMAL_STOP_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PMETER_OVERF, 2, S2MPG10_IRQ_PMETER_OVERF_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B1M, 3, S2MPG10_IRQ_OCP_B1M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B2M, 3, S2MPG10_IRQ_OCP_B2M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B3M, 3, S2MPG10_IRQ_OCP_B3M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B4M, 3, S2MPG10_IRQ_OCP_B4M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B5M, 3, S2MPG10_IRQ_OCP_B5M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B6M, 3, S2MPG10_IRQ_OCP_B6M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B7M, 3, S2MPG10_IRQ_OCP_B7M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B8M, 3, S2MPG10_IRQ_OCP_B8M_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B9M, 4, S2MPG10_IRQ_OCP_B9M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B10M, 4, S2MPG10_IRQ_OCP_B10M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WLWP_ACC, 4, S2MPG10_IRQ_WLWP_ACC_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SMPL_TIMEOUT, 4, S2MPG10_IRQ_SMPL_TIMEOUT_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WTSR_TIMEOUT, 4, S2MPG10_IRQ_WTSR_TIMEOUT_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SPD_SRP_PKT_RST, 4, S2MPG10_IRQ_SPD_SRP_PKT_RST_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH0, 5, S2MPG10_IRQ_PWR_WARN_CH0_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH1, 5, S2MPG10_IRQ_PWR_WARN_CH1_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH2, 5, S2MPG10_IRQ_PWR_WARN_CH2_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH3, 5, S2MPG10_IRQ_PWR_WARN_CH3_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH4, 5, S2MPG10_IRQ_PWR_WARN_CH4_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH5, 5, S2MPG10_IRQ_PWR_WARN_CH5_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH6, 5, S2MPG10_IRQ_PWR_WARN_CH6_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH7, 5, S2MPG10_IRQ_PWR_WARN_CH7_MASK),
+};
static const struct regmap_irq s2mps11_irqs[] = {
- [S2MPS11_IRQ_PWRONF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONF_MASK,
- },
- [S2MPS11_IRQ_PWRONR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONR_MASK,
- },
- [S2MPS11_IRQ_JIGONBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBF_MASK,
- },
- [S2MPS11_IRQ_JIGONBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBR_MASK,
- },
- [S2MPS11_IRQ_ACOKBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBF_MASK,
- },
- [S2MPS11_IRQ_ACOKBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBR_MASK,
- },
- [S2MPS11_IRQ_PWRON1S] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRON1S_MASK,
- },
- [S2MPS11_IRQ_MRB] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_MRB_MASK,
- },
- [S2MPS11_IRQ_RTC60S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC60S_MASK,
- },
- [S2MPS11_IRQ_RTCA1] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
- },
- [S2MPS11_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
- [S2MPS11_IRQ_SMPL] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_SMPL_MASK,
- },
- [S2MPS11_IRQ_RTC1S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC1S_MASK,
- },
- [S2MPS11_IRQ_WTSR] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_WTSR_MASK,
- },
- [S2MPS11_IRQ_INT120C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT120C_MASK,
- },
- [S2MPS11_IRQ_INT140C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT140C_MASK,
- },
+ REGMAP_IRQ_REG(S2MPS11_IRQ_PWRONF, 0, S2MPS11_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_PWRONR, 0, S2MPS11_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_JIGONBF, 0, S2MPS11_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_JIGONBR, 0, S2MPS11_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_ACOKBF, 0, S2MPS11_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_ACOKBR, 0, S2MPS11_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_PWRON1S, 0, S2MPS11_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_MRB, 0, S2MPS11_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTC60S, 1, S2MPS11_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTCA1, 1, S2MPS11_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTCA0, 1, S2MPS11_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_SMPL, 1, S2MPS11_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTC1S, 1, S2MPS11_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_WTSR, 1, S2MPS11_IRQ_WTSR_MASK),
+
+ REGMAP_IRQ_REG(S2MPS11_IRQ_INT120C, 2, S2MPS11_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_INT140C, 2, S2MPS11_IRQ_INT140C_MASK),
};
static const struct regmap_irq s2mps14_irqs[] = {
- [S2MPS14_IRQ_PWRONF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONF_MASK,
- },
- [S2MPS14_IRQ_PWRONR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONR_MASK,
- },
- [S2MPS14_IRQ_JIGONBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBF_MASK,
- },
- [S2MPS14_IRQ_JIGONBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBR_MASK,
- },
- [S2MPS14_IRQ_ACOKBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBF_MASK,
- },
- [S2MPS14_IRQ_ACOKBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBR_MASK,
- },
- [S2MPS14_IRQ_PWRON1S] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRON1S_MASK,
- },
- [S2MPS14_IRQ_MRB] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_MRB_MASK,
- },
- [S2MPS14_IRQ_RTC60S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC60S_MASK,
- },
- [S2MPS14_IRQ_RTCA1] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
- },
- [S2MPS14_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
- [S2MPS14_IRQ_SMPL] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_SMPL_MASK,
- },
- [S2MPS14_IRQ_RTC1S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC1S_MASK,
- },
- [S2MPS14_IRQ_WTSR] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_WTSR_MASK,
- },
- [S2MPS14_IRQ_INT120C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT120C_MASK,
- },
- [S2MPS14_IRQ_INT140C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT140C_MASK,
- },
- [S2MPS14_IRQ_TSD] = {
- .reg_offset = 2,
- .mask = S2MPS14_IRQ_TSD_MASK,
- },
+ REGMAP_IRQ_REG(S2MPS14_IRQ_PWRONF, 0, S2MPS11_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_PWRONR, 0, S2MPS11_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_JIGONBF, 0, S2MPS11_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_JIGONBR, 0, S2MPS11_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_ACOKBF, 0, S2MPS11_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_ACOKBR, 0, S2MPS11_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_PWRON1S, 0, S2MPS11_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_MRB, 0, S2MPS11_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTC60S, 1, S2MPS11_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTCA1, 1, S2MPS11_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTCA0, 1, S2MPS11_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_SMPL, 1, S2MPS11_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTC1S, 1, S2MPS11_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_WTSR, 1, S2MPS11_IRQ_WTSR_MASK),
+
+ REGMAP_IRQ_REG(S2MPS14_IRQ_INT120C, 2, S2MPS11_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_INT140C, 2, S2MPS11_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_TSD, 2, S2MPS14_IRQ_TSD_MASK),
};
static const struct regmap_irq s2mpu02_irqs[] = {
- [S2MPU02_IRQ_PWRONF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONF_MASK,
- },
- [S2MPU02_IRQ_PWRONR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONR_MASK,
- },
- [S2MPU02_IRQ_JIGONBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBF_MASK,
- },
- [S2MPU02_IRQ_JIGONBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBR_MASK,
- },
- [S2MPU02_IRQ_ACOKBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBF_MASK,
- },
- [S2MPU02_IRQ_ACOKBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBR_MASK,
- },
- [S2MPU02_IRQ_PWRON1S] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRON1S_MASK,
- },
- [S2MPU02_IRQ_MRB] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_MRB_MASK,
- },
- [S2MPU02_IRQ_RTC60S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC60S_MASK,
- },
- [S2MPU02_IRQ_RTCA1] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
- },
- [S2MPU02_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
- [S2MPU02_IRQ_SMPL] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_SMPL_MASK,
- },
- [S2MPU02_IRQ_RTC1S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC1S_MASK,
- },
- [S2MPU02_IRQ_WTSR] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_WTSR_MASK,
- },
- [S2MPU02_IRQ_INT120C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT120C_MASK,
- },
- [S2MPU02_IRQ_INT140C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT140C_MASK,
- },
- [S2MPU02_IRQ_TSD] = {
- .reg_offset = 2,
- .mask = S2MPS14_IRQ_TSD_MASK,
- },
+ REGMAP_IRQ_REG(S2MPU02_IRQ_PWRONF, 0, S2MPS11_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_PWRONR, 0, S2MPS11_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_JIGONBF, 0, S2MPS11_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_JIGONBR, 0, S2MPS11_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_ACOKBF, 0, S2MPS11_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_ACOKBR, 0, S2MPS11_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_PWRON1S, 0, S2MPS11_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_MRB, 0, S2MPS11_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTC60S, 1, S2MPS11_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTCA1, 1, S2MPS11_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTCA0, 1, S2MPS11_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_SMPL, 1, S2MPS11_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTC1S, 1, S2MPS11_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_WTSR, 1, S2MPS11_IRQ_WTSR_MASK),
+
+ REGMAP_IRQ_REG(S2MPU02_IRQ_INT120C, 2, S2MPS11_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_INT140C, 2, S2MPS11_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_TSD, 2, S2MPS14_IRQ_TSD_MASK),
};
static const struct regmap_irq s2mpu05_irqs[] = {
@@ -247,74 +159,35 @@ static const struct regmap_irq s2mpu05_irqs[] = {
};
static const struct regmap_irq s5m8767_irqs[] = {
- [S5M8767_IRQ_PWRR] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_PWRR_MASK,
- },
- [S5M8767_IRQ_PWRF] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_PWRF_MASK,
- },
- [S5M8767_IRQ_PWR1S] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_PWR1S_MASK,
- },
- [S5M8767_IRQ_JIGR] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_JIGR_MASK,
- },
- [S5M8767_IRQ_JIGF] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_JIGF_MASK,
- },
- [S5M8767_IRQ_LOWBAT2] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_LOWBAT2_MASK,
- },
- [S5M8767_IRQ_LOWBAT1] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_LOWBAT1_MASK,
- },
- [S5M8767_IRQ_MRB] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_MRB_MASK,
- },
- [S5M8767_IRQ_DVSOK2] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_DVSOK2_MASK,
- },
- [S5M8767_IRQ_DVSOK3] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_DVSOK3_MASK,
- },
- [S5M8767_IRQ_DVSOK4] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_DVSOK4_MASK,
- },
- [S5M8767_IRQ_RTC60S] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTC60S_MASK,
- },
- [S5M8767_IRQ_RTCA1] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTCA1_MASK,
- },
- [S5M8767_IRQ_RTCA2] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTCA2_MASK,
- },
- [S5M8767_IRQ_SMPL] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_SMPL_MASK,
- },
- [S5M8767_IRQ_RTC1S] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTC1S_MASK,
- },
- [S5M8767_IRQ_WTSR] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_WTSR_MASK,
- },
+ REGMAP_IRQ_REG(S5M8767_IRQ_PWRR, 0, S5M8767_IRQ_PWRR_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_PWRF, 0, S5M8767_IRQ_PWRF_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_PWR1S, 0, S5M8767_IRQ_PWR1S_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_JIGR, 0, S5M8767_IRQ_JIGR_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_JIGF, 0, S5M8767_IRQ_JIGF_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_LOWBAT2, 0, S5M8767_IRQ_LOWBAT2_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_LOWBAT1, 0, S5M8767_IRQ_LOWBAT1_MASK),
+
+ REGMAP_IRQ_REG(S5M8767_IRQ_MRB, 1, S5M8767_IRQ_MRB_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_DVSOK2, 1, S5M8767_IRQ_DVSOK2_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_DVSOK3, 1, S5M8767_IRQ_DVSOK3_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_DVSOK4, 1, S5M8767_IRQ_DVSOK4_MASK),
+
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTC60S, 2, S5M8767_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTCA1, 2, S5M8767_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTCA2, 2, S5M8767_IRQ_RTCA2_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_SMPL, 2, S5M8767_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTC1S, 2, S5M8767_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_WTSR, 2, S5M8767_IRQ_WTSR_MASK),
+};
+
+/* All S2MPG10 interrupt sources are read-only and don't require clearing */
+static const struct regmap_irq_chip s2mpg10_irq_chip = {
+ .name = "s2mpg10",
+ .irqs = s2mpg10_irqs,
+ .num_irqs = ARRAY_SIZE(s2mpg10_irqs),
+ .num_regs = 6,
+ .status_base = S2MPG10_PMIC_INT1,
+ .mask_base = S2MPG10_PMIC_INT1M,
};
static const struct regmap_irq_chip s2mps11_irq_chip = {
@@ -382,23 +255,21 @@ static const struct regmap_irq_chip s5m8767_irq_chip = {
int sec_irq_init(struct sec_pmic_dev *sec_pmic)
{
- int ret = 0;
- int type = sec_pmic->device_type;
const struct regmap_irq_chip *sec_irq_chip;
+ int ret;
- if (!sec_pmic->irq) {
- dev_warn(sec_pmic->dev,
- "No interrupt specified, no interrupts\n");
- return 0;
- }
-
- switch (type) {
+ switch (sec_pmic->device_type) {
case S5M8767X:
sec_irq_chip = &s5m8767_irq_chip;
break;
+ case S2DOS05:
+ return 0;
case S2MPA01:
sec_irq_chip = &s2mps14_irq_chip;
break;
+ case S2MPG10:
+ sec_irq_chip = &s2mpg10_irq_chip;
+ break;
case S2MPS11X:
sec_irq_chip = &s2mps11_irq_chip;
break;
@@ -418,18 +289,24 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
sec_irq_chip = &s2mpu05_irq_chip;
break;
default:
- dev_err(sec_pmic->dev, "Unknown device type %lu\n",
- sec_pmic->device_type);
- return -EINVAL;
+ return dev_err_probe(sec_pmic->dev, -EINVAL,
+ "Unsupported device type %d\n",
+ sec_pmic->device_type);
+ }
+
+ if (!sec_pmic->irq) {
+ dev_warn(sec_pmic->dev,
+ "No interrupt specified, no interrupts\n");
+ return 0;
}
ret = devm_regmap_add_irq_chip(sec_pmic->dev, sec_pmic->regmap_pmic,
sec_pmic->irq, IRQF_ONESHOT,
0, sec_irq_chip, &sec_pmic->irq_data);
- if (ret != 0) {
- dev_err(sec_pmic->dev, "Failed to register IRQ chip: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(sec_pmic->dev, ret,
+ "Failed to add %s IRQ chip\n",
+ sec_irq_chip->name);
/*
* The rtc-s5m driver requests S2MPS14_IRQ_RTCA0 also for S2MPS11
@@ -439,10 +316,3 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return 0;
}
-EXPORT_SYMBOL_GPL(sec_irq_init);
-
-MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_DESCRIPTION("Interrupt support for the S5M MFD");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 7ee293b09f62..a5f9241fa3f2 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -631,49 +631,6 @@ unsigned long sm501_set_clock(struct device *dev,
EXPORT_SYMBOL_GPL(sm501_set_clock);
-/* sm501_find_clock
- *
- * finds the closest available frequency for a given clock
-*/
-
-unsigned long sm501_find_clock(struct device *dev,
- int clksrc,
- unsigned long req_freq)
-{
- struct sm501_devdata *sm = dev_get_drvdata(dev);
- unsigned long sm501_freq; /* the frequency achieveable by the 501 */
- struct sm501_clock to;
-
- switch (clksrc) {
- case SM501_CLOCK_P2XCLK:
- if (sm->rev >= 0xC0) {
- /* SM502 -> use the programmable PLL */
- sm501_freq = (sm501_calc_pll(2 * req_freq,
- &to, 5) / 2);
- } else {
- sm501_freq = (sm501_select_clock(2 * req_freq,
- &to, 5) / 2);
- }
- break;
-
- case SM501_CLOCK_V2XCLK:
- sm501_freq = (sm501_select_clock(2 * req_freq, &to, 3) / 2);
- break;
-
- case SM501_CLOCK_MCLK:
- case SM501_CLOCK_M1XCLK:
- sm501_freq = sm501_select_clock(req_freq, &to, 3);
- break;
-
- default:
- sm501_freq = 0; /* error */
- }
-
- return sm501_freq;
-}
-
-EXPORT_SYMBOL_GPL(sm501_find_clock);
-
static struct sm501_device *to_sm_device(struct platform_device *pdev)
{
return container_of(pdev, struct sm501_device, pdev);
@@ -915,7 +872,8 @@ static void sm501_gpio_ensure_gpio(struct sm501_gpio_chip *smchip,
}
}
-static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int sm501_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
@@ -939,6 +897,8 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
sm501_gpio_ensure_gpio(smchip, bit);
spin_unlock_irqrestore(&smgpio->lock, save);
+
+ return 0;
}
static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
@@ -1005,7 +965,7 @@ static const struct gpio_chip gpio_chip_template = {
.ngpio = 32,
.direction_input = sm501_gpio_input,
.direction_output = sm501_gpio_output,
- .set = sm501_gpio_set,
+ .set_rv = sm501_gpio_set,
.get = sm501_gpio_get,
};
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 7186e2108108..d6b4350779e6 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -210,7 +210,10 @@ static int sprd_pmic_probe(struct spi_device *spi)
return ret;
}
- device_init_wakeup(&spi->dev, true);
+ ret = devm_device_init_wakeup(&spi->dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to init wakeup\n");
+
return 0;
}
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index b2704a9809c7..09073dbc9c80 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -6,6 +6,7 @@
* Inspired by Benjamin Gaignard's stm32-timers driver
*/
+#include <linux/bitfield.h>
#include <linux/mfd/stm32-lptimer.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -49,6 +50,36 @@ static int stm32_lptimer_detect_encoder(struct stm32_lptimer *ddata)
return 0;
}
+static int stm32_lptimer_detect_hwcfgr(struct stm32_lptimer *ddata)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(ddata->regmap, STM32_LPTIM_VERR, &ddata->version);
+ if (ret)
+ return ret;
+
+ /* Try to guess parameters from HWCFGR: e.g. encoder mode (STM32MP15) */
+ ret = regmap_read(ddata->regmap, STM32_LPTIM_HWCFGR1, &val);
+ if (ret)
+ return ret;
+
+ /* Fallback to legacy init if HWCFGR isn't present */
+ if (!val)
+ return stm32_lptimer_detect_encoder(ddata);
+
+ ddata->has_encoder = FIELD_GET(STM32_LPTIM_HWCFGR1_ENCODER, val);
+
+ ret = regmap_read(ddata->regmap, STM32_LPTIM_HWCFGR2, &val);
+ if (ret)
+ return ret;
+
+ /* Number of capture/compare channels */
+ ddata->num_cc_chans = FIELD_GET(STM32_LPTIM_HWCFGR2_CHAN_NUM, val);
+
+ return 0;
+}
+
static int stm32_lptimer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -73,7 +104,7 @@ static int stm32_lptimer_probe(struct platform_device *pdev)
if (IS_ERR(ddata->clk))
return PTR_ERR(ddata->clk);
- ret = stm32_lptimer_detect_encoder(ddata);
+ ret = stm32_lptimer_detect_hwcfgr(ddata);
if (ret)
return ret;
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 792236f56399..b9cc85ea2c40 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -129,7 +129,7 @@ static const struct spi_device_id stmpe_spi_id[] = {
{ "stmpe2403", STMPE2403 },
{ }
};
-MODULE_DEVICE_TABLE(spi, stmpe_id);
+MODULE_DEVICE_TABLE(spi, stmpe_spi_id);
static struct spi_driver stmpe_spi_driver = {
.driver = {
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 00fb12c4f491..03bd5cd66798 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -446,7 +446,7 @@ static irqreturn_t tps65010_irq(int irq, void *_tps)
* offsets 4..5 == LED1/nPG, LED2 (we set one of the non-BLINK modes)
* offset 6 == vibrator motor driver
*/
-static void
+static int
tps65010_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
if (offset < 4)
@@ -455,6 +455,8 @@ tps65010_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
tps65010_set_led(offset - 3, value ? ON : OFF);
else
tps65010_set_vib(value);
+
+ return 0;
}
static int
@@ -512,7 +514,6 @@ static void tps65010_remove(struct i2c_client *client)
if (client->irq > 0)
free_irq(client->irq, tps);
cancel_delayed_work_sync(&tps->work);
- debugfs_remove(tps->file);
the_tps = NULL;
}
@@ -608,7 +609,7 @@ static int tps65010_probe(struct i2c_client *client)
tps65010_work(&tps->work.work);
- tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, NULL,
+ tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, client->debugfs,
tps, DEBUG_FOPS);
/* optionally register GPIOs */
@@ -619,7 +620,7 @@ static int tps65010_probe(struct i2c_client *client)
tps->chip.parent = &client->dev;
tps->chip.owner = THIS_MODULE;
- tps->chip.set = tps65010_gpio_set;
+ tps->chip.set_rv = tps65010_gpio_set;
tps->chip.direction_output = tps65010_output;
/* NOTE: only partial support for inputs; nyet IRQs */
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index fc4d4c844a81..fd71ba29f6b5 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -104,7 +104,8 @@ unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
return ucb1x00_reg_read(ucb, UCB_IO_DATA);
}
-static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int ucb1x00_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ucb1x00 *ucb = gpiochip_get_data(chip);
unsigned long flags;
@@ -119,6 +120,8 @@ static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
ucb1x00_disable(ucb);
spin_unlock_irqrestore(&ucb->io_lock, flags);
+
+ return 0;
}
static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -567,7 +570,7 @@ static int ucb1x00_probe(struct mcp *mcp)
ucb->gpio.owner = THIS_MODULE;
ucb->gpio.base = pdata->gpio_base;
ucb->gpio.ngpio = 10;
- ucb->gpio.set = ucb1x00_gpio_set;
+ ucb->gpio.set_rv = ucb1x00_gpio_set;
ucb->gpio.get = ucb1x00_gpio_get;
ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index ff2f9e55ef28..aed653ce8fa2 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -98,7 +98,7 @@ config MTD_MCHP48L640
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
This enable SNOR support on SPEAR platforms using SMI controller
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 8dc4f5c493fc..391d81ad960c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -559,7 +559,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
/* Sanitize user input */
p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
- return mtd_add_partition(mtd, p.devname, p.start, p.length);
+ return mtd_add_partition(mtd, p.devname, p.start, p.length, NULL);
case BLKPG_DEL_PARTITION:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5ba9a741f5ac..429d8c16baf0 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -68,7 +68,13 @@ static struct class mtd_class = {
.pm = MTD_CLS_PM_OPS,
};
+static struct class mtd_master_class = {
+ .name = "mtd_master",
+ .pm = MTD_CLS_PM_OPS,
+};
+
static DEFINE_IDR(mtd_idr);
+static DEFINE_IDR(mtd_master_idr);
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
@@ -83,8 +89,9 @@ EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
-
+#define MTD_MASTER_DEVS 255
#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
+static dev_t mtd_master_devt;
/* REVISIT once MTD uses the driver model better, whoever allocates
* the mtd_info will probably want to use the release() hook...
@@ -104,6 +111,17 @@ static void mtd_release(struct device *dev)
device_destroy(&mtd_class, index + 1);
}
+static void mtd_master_release(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ idr_remove(&mtd_master_idr, mtd->index);
+ of_node_put(mtd_get_of_node(mtd));
+
+ if (mtd_is_partition(mtd))
+ release_mtd_partition(mtd);
+}
+
static void mtd_device_release(struct kref *kref)
{
struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
@@ -367,6 +385,11 @@ static const struct device_type mtd_devtype = {
.release = mtd_release,
};
+static const struct device_type mtd_master_devtype = {
+ .name = "mtd_master",
+ .release = mtd_master_release,
+};
+
static bool mtd_expert_analysis_mode;
#ifdef CONFIG_DEBUG_FS
@@ -634,13 +657,13 @@ exit_parent:
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
+ * @partitioned: create partitioned device
*
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or non-zero on failure.
*/
-
-int add_mtd_device(struct mtd_info *mtd)
+int add_mtd_device(struct mtd_info *mtd, bool partitioned)
{
struct device_node *np = mtd_get_of_node(mtd);
struct mtd_info *master = mtd_get_master(mtd);
@@ -687,10 +710,17 @@ int add_mtd_device(struct mtd_info *mtd)
ofidx = -1;
if (np)
ofidx = of_alias_get_id(np, "mtd");
- if (ofidx >= 0)
- i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
- else
- i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ if (partitioned) {
+ if (ofidx >= 0)
+ i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
+ else
+ i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ } else {
+ if (ofidx >= 0)
+ i = idr_alloc(&mtd_master_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
+ else
+ i = idr_alloc(&mtd_master_idr, mtd, 0, 0, GFP_KERNEL);
+ }
if (i < 0) {
error = i;
goto fail_locked;
@@ -738,10 +768,18 @@ int add_mtd_device(struct mtd_info *mtd)
/* Caller should have set dev.parent to match the
* physical device, if appropriate.
*/
- mtd->dev.type = &mtd_devtype;
- mtd->dev.class = &mtd_class;
- mtd->dev.devt = MTD_DEVT(i);
- error = dev_set_name(&mtd->dev, "mtd%d", i);
+ if (partitioned) {
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ dev_set_name(&mtd->dev, "mtd%d", i);
+ error = dev_set_name(&mtd->dev, "mtd%d", i);
+ } else {
+ mtd->dev.type = &mtd_master_devtype;
+ mtd->dev.class = &mtd_master_class;
+ mtd->dev.devt = MKDEV(MAJOR(mtd_master_devt), i);
+ error = dev_set_name(&mtd->dev, "mtd_master%d", i);
+ }
if (error)
goto fail_devname;
dev_set_drvdata(&mtd->dev, mtd);
@@ -749,6 +787,7 @@ int add_mtd_device(struct mtd_info *mtd)
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
if (error) {
+ pr_err("mtd: %s device_register fail %d\n", mtd->name, error);
put_device(&mtd->dev);
goto fail_added;
}
@@ -760,10 +799,13 @@ int add_mtd_device(struct mtd_info *mtd)
mtd_debugfs_populate(mtd);
- device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
- "mtd%dro", i);
+ if (partitioned) {
+ device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
+ "mtd%dro", i);
+ }
- pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
+ pr_debug("mtd: Giving out %spartitioned device %d to %s\n",
+ partitioned ? "" : "un-", i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
@@ -771,13 +813,16 @@ int add_mtd_device(struct mtd_info *mtd)
mutex_unlock(&mtd_table_mutex);
- if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
- if (IS_BUILTIN(CONFIG_MTD)) {
- pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
- ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
- } else {
- pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
- mtd->index, mtd->name);
+ if (partitioned) {
+ if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
+ if (IS_BUILTIN(CONFIG_MTD)) {
+ pr_info("mtd: setting mtd%d (%s) as root device\n",
+ mtd->index, mtd->name);
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ } else {
+ pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
+ mtd->index, mtd->name);
+ }
}
}
@@ -793,7 +838,10 @@ fail_nvmem_add:
fail_added:
of_node_put(mtd_get_of_node(mtd));
fail_devname:
- idr_remove(&mtd_idr, i);
+ if (partitioned)
+ idr_remove(&mtd_idr, i);
+ else
+ idr_remove(&mtd_master_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
return error;
@@ -811,12 +859,14 @@ fail_locked:
int del_mtd_device(struct mtd_info *mtd)
{
- int ret;
struct mtd_notifier *not;
+ struct idr *idr;
+ int ret;
mutex_lock(&mtd_table_mutex);
- if (idr_find(&mtd_idr, mtd->index) != mtd) {
+ idr = mtd->dev.class == &mtd_class ? &mtd_idr : &mtd_master_idr;
+ if (idr_find(idr, mtd->index) != mtd) {
ret = -ENODEV;
goto out_error;
}
@@ -1056,6 +1106,7 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
+ struct mtd_info *parent;
int ret, err;
mtd_set_dev_defaults(mtd);
@@ -1064,25 +1115,30 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
if (ret)
goto out;
+ ret = add_mtd_device(mtd, false);
+ if (ret)
+ goto out;
+
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
- ret = add_mtd_device(mtd);
+ ret = mtd_add_partition(mtd, mtd->name, 0, MTDPART_SIZ_FULL, &parent);
if (ret)
goto out;
+
+ } else {
+ parent = mtd;
}
/* Prefer parsed partitions over driver-provided fallback */
- ret = parse_mtd_partitions(mtd, types, parser_data);
+ ret = parse_mtd_partitions(parent, types, parser_data);
if (ret == -EPROBE_DEFER)
goto out;
if (ret > 0)
ret = 0;
else if (nr_parts)
- ret = add_mtd_partitions(mtd, parts, nr_parts);
- else if (!device_is_registered(&mtd->dev))
- ret = add_mtd_device(mtd);
- else
- ret = 0;
+ ret = add_mtd_partitions(parent, parts, nr_parts);
+ else if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ ret = mtd_add_partition(parent, mtd->name, 0, MTDPART_SIZ_FULL, NULL);
if (ret)
goto out;
@@ -1102,13 +1158,14 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
register_reboot_notifier(&mtd->reboot_notifier);
}
+ return 0;
out:
- if (ret) {
- nvmem_unregister(mtd->otp_user_nvmem);
- nvmem_unregister(mtd->otp_factory_nvmem);
- }
+ nvmem_unregister(mtd->otp_user_nvmem);
+ nvmem_unregister(mtd->otp_factory_nvmem);
- if (ret && device_is_registered(&mtd->dev)) {
+ del_mtd_partitions(mtd);
+
+ if (device_is_registered(&mtd->dev)) {
err = del_mtd_device(mtd);
if (err)
pr_err("Error when deleting MTD device (%d)\n", err);
@@ -1267,8 +1324,7 @@ int __get_mtd_device(struct mtd_info *mtd)
mtd = mtd->parent;
}
- if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
- kref_get(&master->refcnt);
+ kref_get(&master->refcnt);
return 0;
}
@@ -1362,8 +1418,7 @@ void __put_mtd_device(struct mtd_info *mtd)
mtd = parent;
}
- if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
- kref_put(&master->refcnt, mtd_device_release);
+ kref_put(&master->refcnt, mtd_device_release);
module_put(master->owner);
@@ -2530,6 +2585,16 @@ static int __init init_mtd(void)
if (ret)
goto err_reg;
+ ret = class_register(&mtd_master_class);
+ if (ret)
+ goto err_reg2;
+
+ ret = alloc_chrdev_region(&mtd_master_devt, 0, MTD_MASTER_DEVS, "mtd_master");
+ if (ret < 0) {
+ pr_err("unable to allocate char dev region\n");
+ goto err_chrdev;
+ }
+
mtd_bdi = mtd_bdi_init("mtd");
if (IS_ERR(mtd_bdi)) {
ret = PTR_ERR(mtd_bdi);
@@ -2554,6 +2619,10 @@ out_procfs:
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
err_bdi:
+ unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS);
+err_chrdev:
+ class_unregister(&mtd_master_class);
+err_reg2:
class_unregister(&mtd_class);
err_reg:
pr_err("Error registering mtd class or bdi: %d\n", ret);
@@ -2567,9 +2636,12 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
+ class_unregister(&mtd_master_class);
+ unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS);
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
idr_destroy(&mtd_idr);
+ idr_destroy(&mtd_master_idr);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index b014861a06a6..2258d31c5aa6 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -8,7 +8,7 @@ extern struct mutex mtd_table_mutex;
extern struct backing_dev_info *mtd_bdi;
struct mtd_info *__mtd_next_device(int i);
-int __must_check add_mtd_device(struct mtd_info *mtd);
+int __must_check add_mtd_device(struct mtd_info *mtd, bool partitioned);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 994e8c51e674..5a3db36d734e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -86,8 +86,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
* parent conditional on that option. Note, this is a way to
* distinguish between the parent and its partitions in sysfs.
*/
- child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
- &parent->dev : parent->dev.parent;
+ child->dev.parent = &parent->dev;
child->dev.of_node = part->of_node;
child->parent = parent;
child->part.offset = part->offset;
@@ -243,7 +242,7 @@ static int mtd_add_partition_attrs(struct mtd_info *new)
}
int mtd_add_partition(struct mtd_info *parent, const char *name,
- long long offset, long long length)
+ long long offset, long long length, struct mtd_info **out)
{
struct mtd_info *master = mtd_get_master(parent);
u64 parent_size = mtd_is_partition(parent) ?
@@ -276,12 +275,15 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(child);
+ ret = add_mtd_device(child, true);
if (ret)
goto err_remove_part;
mtd_add_partition_attrs(child);
+ if (out)
+ *out = child;
+
return 0;
err_remove_part:
@@ -413,7 +415,7 @@ int add_mtd_partitions(struct mtd_info *parent,
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(child);
+ ret = add_mtd_device(child, true);
if (ret) {
mutex_lock(&master->master.partitions_lock);
list_del(&child->part.node);
@@ -590,9 +592,6 @@ static int mtd_part_of_parse(struct mtd_info *master,
int ret, err = 0;
dev = &master->dev;
- /* Use parent device (controller) if the top level MTD is not registered */
- if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master))
- dev = master->dev.parent;
np = mtd_get_of_node(master);
if (mtd_is_partition(master))
@@ -711,6 +710,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
if (ret < 0 && !err)
err = ret;
}
+
return err;
}
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
index 56b56f726b99..1bf9a5a64b87 100644
--- a/drivers/mtd/nand/ecc-mxic.c
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
- int nents, step, ret;
+ int nents, step, ret = 0;
if (req->mode == MTD_OPS_RAW)
return 0;
diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
index e0ed25b5afea..4dc4d65e7d32 100644
--- a/drivers/mtd/nand/qpic_common.c
+++ b/drivers/mtd/nand/qpic_common.c
@@ -236,21 +236,21 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
int i, ret;
struct bam_cmd_element *bam_ce_buffer;
struct bam_transaction *bam_txn = nandc->bam_txn;
+ u32 offset;
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
/* fill the command desc */
for (i = 0; i < size; i++) {
+ offset = nandc->props->bam_offset + reg_off + 4 * i;
if (read)
bam_prep_ce(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_READ_COMMAND,
+ offset, BAM_READ_COMMAND,
reg_buf_dma_addr(nandc,
(__le32 *)vaddr + i));
else
bam_prep_ce_le32(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_WRITE_COMMAND,
+ offset, BAM_WRITE_COMMAND,
*((__le32 *)vaddr + i));
}
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index b8035df8f732..4b99d9c422c3 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -34,7 +34,7 @@ config MTD_NAND_DENALI_DT
config MTD_NAND_AMS_DELTA
tristate "Amstrad E3 NAND controller"
depends on MACH_AMS_DELTA || COMPILE_TEST
- default y
+ default MACH_AMS_DELTA
help
Support for NAND flash on Amstrad E3 (Delta).
@@ -462,6 +462,13 @@ config MTD_NAND_NUVOTON_MA35
Enables support for the NAND controller found on
the Nuvoton MA35 series SoCs.
+config MTD_NAND_LOONGSON1
+ tristate "Loongson1 NAND controller"
+ depends on LOONGSON1_APB_DMA || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Enables support for NAND controller on Loongson1 SoCs.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 99e79c448847..711d043ad4f8 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o
+obj-$(CONFIG_MTD_NAND_LOONGSON1) += loongson1-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 6487dfc64258..e532c3535b16 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -171,6 +171,7 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
{
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
u32 code = 0;
+ int rc;
if (cmd == NAND_CMD_NONE)
return;
@@ -182,7 +183,9 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
if (cmd != NAND_CMD_RESET)
code |= NCTL_CSA;
- bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+ rc = bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+ if (rc)
+ pr_err("ctl_cmd didn't work with error %d\n", rc);
}
/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 17f6d9723df9..62bdda3be92f 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -65,6 +65,7 @@ module_param(wp_on, int, 0444);
#define CMD_PARAMETER_READ 0x0e
#define CMD_PARAMETER_CHANGE_COL 0x0f
#define CMD_LOW_LEVEL_OP 0x10
+#define CMD_NOT_SUPPORTED 0xff
struct brcm_nand_dma_desc {
u32 next_desc;
@@ -101,7 +102,7 @@ struct brcm_nand_dma_desc {
#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
-#define NAND_POLL_STATUS_TIMEOUT_MS 100
+#define NAND_POLL_STATUS_TIMEOUT_MS 500
#define EDU_CMD_WRITE 0x00
#define EDU_CMD_READ 0x01
@@ -199,6 +200,30 @@ static const u16 flash_dma_regs_v4[] = {
[FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
};
+/* Native command conversion for legacy controllers (< v5.0) */
+static const u8 native_cmd_conv[] = {
+ [NAND_CMD_READ0] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READ1] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDOUT] = CMD_PARAMETER_CHANGE_COL,
+ [NAND_CMD_PAGEPROG] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READOOB] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_ERASE1] = CMD_BLOCK_ERASE,
+ [NAND_CMD_STATUS] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_SEQIN] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDIN] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READID] = CMD_DEVICE_ID_READ,
+ [NAND_CMD_ERASE2] = CMD_NULL,
+ [NAND_CMD_PARAM] = CMD_PARAMETER_READ,
+ [NAND_CMD_GET_FEATURES] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_SET_FEATURES] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RESET] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READSTART] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READCACHESEQ] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READCACHEEND] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDOUTSTART] = CMD_NULL,
+ [NAND_CMD_CACHEDPROG] = CMD_NOT_SUPPORTED,
+};
+
/* Controller feature flags */
enum {
BRCMNAND_HAS_1K_SECTORS = BIT(0),
@@ -237,6 +262,12 @@ struct brcmnand_controller {
/* List of NAND hosts (one for each chip-select) */
struct list_head host_list;
+ /* Functions to be called from exec_op */
+ int (*check_instr)(struct nand_chip *chip,
+ const struct nand_operation *op);
+ int (*exec_instr)(struct nand_chip *chip,
+ const struct nand_operation *op);
+
/* EDU info, per-transaction */
const u16 *edu_offsets;
void __iomem *edu_base;
@@ -310,9 +341,6 @@ struct brcmnand_host {
struct platform_device *pdev;
int cs;
- unsigned int last_cmd;
- unsigned int last_byte;
- u64 last_addr;
struct brcmnand_cfg hwcfg;
struct brcmnand_controller *ctrl;
};
@@ -2233,14 +2261,11 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
u64 addr = (u64)page << chip->page_shift;
- host->last_addr = addr;
-
- return brcmnand_read(mtd, chip, host->last_addr,
- mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ return brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
+ (u32 *)buf, oob);
}
static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
@@ -2252,11 +2277,9 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int ret;
u64 addr = (u64)page << chip->page_shift;
- host->last_addr = addr;
-
brcmnand_set_ecc_enabled(host, 0);
- ret = brcmnand_read(mtd, chip, host->last_addr,
- mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ ret = brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
+ (u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
}
@@ -2363,13 +2386,10 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
u64 addr = (u64)page << chip->page_shift;
- host->last_addr = addr;
-
- return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ return brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
}
static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
@@ -2381,9 +2401,8 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
u64 addr = (u64)page << chip->page_shift;
int ret = 0;
- host->last_addr = addr;
brcmnand_set_ecc_enabled(host, 0);
- ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ ret = brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
@@ -2490,18 +2509,190 @@ static int brcmnand_op_is_reset(const struct nand_operation *op)
return 0;
}
+static int brcmnand_check_instructions(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ return 0;
+}
+
+static int brcmnand_exec_instructions(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = brcmnand_exec_instr(host, i, op);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmnand_check_instructions_legacy(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ unsigned int i;
+ u8 cmd;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ cmd = native_cmd_conv[instr->ctx.cmd.opcode];
+ if (cmd == CMD_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ break;
+ case NAND_OP_ADDR_INSTR:
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_WAITRDY_INSTR:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int brcmnand_exec_instructions_legacy(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ const struct nand_op_instr *instr;
+ unsigned int i, j;
+ u8 cmd = CMD_NULL, last_cmd = CMD_NULL;
+ int ret = 0;
+ u64 last_addr;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ cmd = native_cmd_conv[instr->ctx.cmd.opcode];
+ if (cmd == CMD_NOT_SUPPORTED) {
+ dev_err(ctrl->dev, "unsupported cmd=%d\n",
+ instr->ctx.cmd.opcode);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ } else if (instr->type == NAND_OP_ADDR_INSTR) {
+ u64 addr = 0;
+
+ if (cmd == CMD_NULL)
+ continue;
+
+ if (instr->ctx.addr.naddrs > 8) {
+ dev_err(ctrl->dev, "unsupported naddrs=%u\n",
+ instr->ctx.addr.naddrs);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (j = 0; j < instr->ctx.addr.naddrs; j++)
+ addr |= (instr->ctx.addr.addrs[j]) << (j << 3);
+
+ if (cmd == CMD_BLOCK_ERASE)
+ addr <<= chip->page_shift;
+ else if (cmd == CMD_PARAMETER_CHANGE_COL)
+ addr &= ~((u64)(FC_BYTES - 1));
+
+ brcmnand_set_cmd_addr(mtd, addr);
+ brcmnand_send_cmd(host, cmd);
+ last_addr = addr;
+ last_cmd = cmd;
+ cmd = CMD_NULL;
+ brcmnand_waitfunc(chip);
+
+ if (last_cmd == CMD_PARAMETER_READ ||
+ last_cmd == CMD_PARAMETER_CHANGE_COL) {
+ /* Copy flash cache word-wise */
+ u32 *flash_cache = (u32 *)ctrl->flash_cache;
+
+ brcmnand_soc_data_bus_prepare(ctrl->soc, true);
+
+ /*
+ * Must cache the FLASH_CACHE now, since changes in
+ * SECTOR_SIZE_1K may invalidate it
+ */
+ for (j = 0; j < FC_WORDS; j++)
+ /*
+ * Flash cache is big endian for parameter pages, at
+ * least on STB SoCs
+ */
+ flash_cache[j] = be32_to_cpu(brcmnand_read_fc(ctrl, j));
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
+ }
+ } else if (instr->type == NAND_OP_DATA_IN_INSTR) {
+ u8 *in = instr->ctx.data.buf.in;
+
+ if (last_cmd == CMD_DEVICE_ID_READ) {
+ u32 val;
+
+ if (instr->ctx.data.len > 8) {
+ dev_err(ctrl->dev, "unsupported len=%u\n",
+ instr->ctx.data.len);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (j = 0; j < instr->ctx.data.len; j++) {
+ if (j == 0)
+ val = brcmnand_read_reg(ctrl, BRCMNAND_ID);
+ else if (j == 4)
+ val = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT);
+
+ in[j] = (val >> (24 - ((j % 4) << 3))) & 0xff;
+ }
+ } else if (last_cmd == CMD_PARAMETER_READ ||
+ last_cmd == CMD_PARAMETER_CHANGE_COL) {
+ u64 addr;
+ u32 offs;
+
+ for (j = 0; j < instr->ctx.data.len; j++) {
+ addr = last_addr + j;
+ offs = addr & (FC_BYTES - 1);
+
+ if (j > 0 && offs == 0)
+ nand_change_read_column_op(chip, addr, NULL, 0,
+ false);
+
+ in[j] = ctrl->flash_cache[offs];
+ }
+ }
+ } else if (instr->type == NAND_OP_WAITRDY_INSTR) {
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ if (ret)
+ break;
+ } else {
+ dev_err(ctrl->dev, "unsupported instruction type: %d\n", instr->type);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ return ret;
+}
+
static int brcmnand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *status;
- unsigned int i;
int ret = 0;
if (check_only)
- return 0;
+ return ctrl->check_instr(chip, op);
if (brcmnand_op_is_status(op)) {
status = op->instrs[1].ctx.data.buf.in;
@@ -2525,11 +2716,7 @@ static int brcmnand_exec_op(struct nand_chip *chip,
if (op->deassert_wp)
brcmnand_wp(mtd, 0);
- for (i = 0; i < op->ninstrs; i++) {
- ret = brcmnand_exec_instr(host, i, op);
- if (ret)
- break;
- }
+ ret = ctrl->exec_instr(chip, op);
if (op->deassert_wp)
brcmnand_wp(mtd, 1);
@@ -3142,6 +3329,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
if (ret)
goto err;
+ /* Only v5.0+ controllers have low level ops support */
+ if (ctrl->nand_version >= 0x0500) {
+ ctrl->check_instr = brcmnand_check_instructions;
+ ctrl->exec_instr = brcmnand_exec_instructions;
+ } else {
+ ctrl->check_instr = brcmnand_check_instructions_legacy;
+ ctrl->exec_instr = brcmnand_exec_instructions_legacy;
+ }
+
/*
* Most chips have this cache at a fixed offset within 'nand' block.
* Some must specify this region separately.
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index e22094e39546..97fa32d73441 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -68,7 +68,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
- ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ ret = pcim_request_all_regions(dev, DENALI_NAND_NAME);
if (ret) {
dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
return ret;
@@ -77,20 +77,18 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto regions_release;
+ return -ENOMEM;
}
denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
- ret = -ENOMEM;
- goto regions_release;
+ return -ENOMEM;
}
ret = denali_init(denali);
if (ret)
- goto regions_release;
+ return ret;
nsels = denali->nbanks;
@@ -118,8 +116,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_remove_denali:
denali_remove(denali);
-regions_release:
- pci_release_regions(dev);
return ret;
}
@@ -127,7 +123,6 @@ static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_controller *denali = pci_get_drvdata(dev);
- pci_release_regions(dev);
denali_remove(denali);
}
diff --git a/drivers/mtd/nand/raw/loongson1-nand-controller.c b/drivers/mtd/nand/raw/loongson1-nand-controller.c
new file mode 100644
index 000000000000..ef8e4f9ce287
--- /dev/null
+++ b/drivers/mtd/nand/raw/loongson1-nand-controller.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NAND Controller Driver for Loongson-1 SoC
+ *
+ * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+/* Loongson-1 NAND Controller Registers */
+#define LS1X_NAND_CMD 0x0
+#define LS1X_NAND_ADDR1 0x4
+#define LS1X_NAND_ADDR2 0x8
+#define LS1X_NAND_TIMING 0xc
+#define LS1X_NAND_IDL 0x10
+#define LS1X_NAND_IDH_STATUS 0x14
+#define LS1X_NAND_PARAM 0x18
+#define LS1X_NAND_OP_NUM 0x1c
+
+/* NAND Command Register Bits */
+#define LS1X_NAND_CMD_OP_DONE BIT(10)
+#define LS1X_NAND_CMD_OP_SPARE BIT(9)
+#define LS1X_NAND_CMD_OP_MAIN BIT(8)
+#define LS1X_NAND_CMD_STATUS BIT(7)
+#define LS1X_NAND_CMD_RESET BIT(6)
+#define LS1X_NAND_CMD_READID BIT(5)
+#define LS1X_NAND_CMD_BLOCKS_ERASE BIT(4)
+#define LS1X_NAND_CMD_ERASE BIT(3)
+#define LS1X_NAND_CMD_WRITE BIT(2)
+#define LS1X_NAND_CMD_READ BIT(1)
+#define LS1X_NAND_CMD_VALID BIT(0)
+
+#define LS1X_NAND_WAIT_CYCLE_MASK GENMASK(7, 0)
+#define LS1X_NAND_HOLD_CYCLE_MASK GENMASK(15, 8)
+#define LS1X_NAND_CELL_SIZE_MASK GENMASK(11, 8)
+
+#define LS1X_NAND_COL_ADDR_CYC 2U
+#define LS1X_NAND_MAX_ADDR_CYC 5U
+
+#define BITS_PER_WORD (4 * BITS_PER_BYTE)
+
+struct ls1x_nand_host;
+
+struct ls1x_nand_op {
+ char addrs[LS1X_NAND_MAX_ADDR_CYC];
+ unsigned int naddrs;
+ unsigned int addrs_offset;
+ unsigned int aligned_offset;
+ unsigned int cmd_reg;
+ unsigned int row_start;
+ unsigned int rdy_timeout_ms;
+ unsigned int orig_len;
+ bool is_readid;
+ bool is_erase;
+ bool is_write;
+ bool is_read;
+ bool is_change_column;
+ size_t len;
+ char *buf;
+};
+
+struct ls1x_nand_data {
+ unsigned int status_field;
+ unsigned int op_scope_field;
+ unsigned int hold_cycle;
+ unsigned int wait_cycle;
+ void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op);
+};
+
+struct ls1x_nand_host {
+ struct device *dev;
+ struct nand_chip chip;
+ struct nand_controller controller;
+ const struct ls1x_nand_data *data;
+ void __iomem *reg_base;
+ struct regmap *regmap;
+ /* DMA Engine stuff */
+ dma_addr_t dma_base;
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
+ struct completion dma_complete;
+};
+
+static const struct regmap_config ls1x_nand_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+
+ op->row_start = chip->page_shift + 1;
+
+ /* The controller abstracts the following NAND operations. */
+ switch (opcode) {
+ case NAND_CMD_STATUS:
+ op->cmd_reg = LS1X_NAND_CMD_STATUS;
+ break;
+ case NAND_CMD_RESET:
+ op->cmd_reg = LS1X_NAND_CMD_RESET;
+ break;
+ case NAND_CMD_READID:
+ op->is_readid = true;
+ op->cmd_reg = LS1X_NAND_CMD_READID;
+ break;
+ case NAND_CMD_ERASE1:
+ op->is_erase = true;
+ op->addrs_offset = LS1X_NAND_COL_ADDR_CYC;
+ break;
+ case NAND_CMD_ERASE2:
+ if (!op->is_erase)
+ return -EOPNOTSUPP;
+ /* During erasing, row_start differs from the default value. */
+ op->row_start = chip->page_shift;
+ op->cmd_reg = LS1X_NAND_CMD_ERASE;
+ break;
+ case NAND_CMD_SEQIN:
+ op->is_write = true;
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (!op->is_write)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LS1X_NAND_CMD_WRITE;
+ break;
+ case NAND_CMD_READ0:
+ op->is_read = true;
+ break;
+ case NAND_CMD_READSTART:
+ if (!op->is_read)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LS1X_NAND_CMD_READ;
+ break;
+ case NAND_CMD_RNDOUT:
+ op->is_change_column = true;
+ break;
+ case NAND_CMD_RNDOUTSTART:
+ if (!op->is_change_column)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LS1X_NAND_CMD_READ;
+ break;
+ default:
+ dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ls1x_nand_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop, struct ls1x_nand_op *op)
+{
+ unsigned int op_id;
+ int ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case NAND_OP_ADDR_INSTR:
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ if (naddrs > LS1X_NAND_MAX_ADDR_CYC)
+ return -EOPNOTSUPP;
+ op->naddrs = naddrs;
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ op->orig_len = nand_subop_get_data_len(subop, op_id);
+ if (instr->type == NAND_OP_DATA_IN_INSTR)
+ op->buf = instr->ctx.data.buf.in + offset;
+ else if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ op->buf = (void *)instr->ctx.data.buf.out + offset;
+
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ int i;
+
+ for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LS1X_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ mask &= GENMASK(chip->page_shift, 0);
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
+
+ if (i == 4) {
+ mask = (u32)0xff >> (BITS_PER_WORD - shift);
+ val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
+ }
+ }
+ }
+}
+
+static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ int i;
+
+ for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LS1X_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
+ }
+ }
+}
+
+static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int col0 = op->addrs[0];
+ short col;
+
+ if (!IS_ALIGNED(col0, chip->buf_align)) {
+ col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
+ op->aligned_offset = op->addrs[0] - col0;
+ op->addrs[0] = col0;
+ }
+
+ if (host->data->set_addr)
+ host->data->set_addr(host, op);
+
+ /* set operation length */
+ if (op->is_write || op->is_read || op->is_change_column)
+ op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
+ else if (op->is_erase)
+ op->len = 1;
+ else
+ op->len = op->orig_len;
+
+ writel(op->len, host->reg_base + LS1X_NAND_OP_NUM);
+
+ /* set operation area and scope */
+ col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
+ if (op->orig_len && !op->is_readid) {
+ unsigned int op_scope = 0;
+
+ if (col < mtd->writesize) {
+ op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN;
+ op_scope = mtd->writesize;
+ }
+
+ op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE;
+ op_scope += mtd->oobsize;
+
+ op_scope <<= __ffs(host->data->op_scope_field);
+ regmap_update_bits(host->regmap, LS1X_NAND_PARAM,
+ host->data->op_scope_field, op_scope);
+ }
+
+ /* set command */
+ writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD);
+
+ /* trigger operation */
+ regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID);
+}
+
+static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ unsigned int val;
+ int ret = 0;
+
+ if (op->rdy_timeout_ms) {
+ ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD,
+ val, val & LS1X_NAND_CMD_OP_DONE,
+ 0, op->rdy_timeout_ms * MSEC_PER_SEC);
+ if (ret)
+ dev_err(host->dev, "operation failed\n");
+ }
+
+ return ret;
+}
+
+static void ls1x_nand_dma_callback(void *data)
+{
+ struct ls1x_nand_host *host = (struct ls1x_nand_host *)data;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
+ if (likely(status == DMA_COMPLETE)) {
+ dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
+ complete(&host->dma_complete);
+ } else {
+ dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
+ }
+}
+
+static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ void *buf = op->buf;
+ char *dma_buf = NULL;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
+ IS_ALIGNED(op->orig_len, chip->buf_align)) {
+ dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "failed to map DMA buffer\n");
+ return -ENXIO;
+ }
+ } else if (!op->is_write) {
+ dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+ } else {
+ dev_err(dev, "subpage writing not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "failed to prepare DMA descriptor\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ desc->callback = ls1x_nand_dma_callback;
+ desc->callback_param = host;
+
+ host->dma_cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(host->dma_cookie);
+ if (ret) {
+ dev_err(dev, "failed to submit DMA descriptor\n");
+ goto err;
+ }
+
+ dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
+ dma_async_issue_pending(chan);
+
+ if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
+ dmaengine_terminate_sync(chan);
+ reinit_completion(&host->dma_complete);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ if (dma_buf)
+ memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
+err:
+ if (dma_buf)
+ dma_free_coherent(dev, op->len, dma_buf, dma_addr);
+ else
+ dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
+
+ return ret;
+}
+
+static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ struct ls1x_nand_op op = {};
+ int ret;
+
+ ret = ls1x_nand_parse_instructions(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ ls1x_nand_trigger_op(host, &op);
+
+ ret = ls1x_nand_dma_transfer(host, &op);
+ if (ret)
+ return ret;
+
+ return ls1x_nand_wait_for_op_done(host, &op);
+}
+
+static int ls1x_nand_misc_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop, struct ls1x_nand_op *op)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ ret = ls1x_nand_parse_instructions(chip, subop, op);
+ if (ret)
+ return ret;
+
+ ls1x_nand_trigger_op(host, op);
+
+ return ls1x_nand_wait_for_op_done(host, op);
+}
+
+static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_op op = {};
+
+ return ls1x_nand_misc_type_exec(chip, subop, &op);
+}
+
+static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ struct ls1x_nand_op op = {};
+ int i, ret;
+ union {
+ char ids[5];
+ struct {
+ int idl;
+ char idh;
+ };
+ } nand_id;
+
+ ret = ls1x_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL);
+ nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS);
+
+ for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++)
+ op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i];
+
+ return ret;
+}
+
+static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ struct ls1x_nand_op op = {};
+ int val, ret;
+
+ ret = ls1x_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ val = readl(host->reg_base + LS1X_NAND_IDH_STATUS);
+ val &= ~host->data->status_field;
+ op.buf[0] = val << ffs(host->data->status_field);
+
+ return ret;
+}
+
+static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_read_status_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int ls1x_nand_is_valid_cmd(u8 opcode)
+{
+ if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
+{
+ if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
+ return 0;
+
+ if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &op->instrs[op_id];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ if (!instr1)
+ instr1 = instr;
+ else if (!instr2)
+ instr2 = instr;
+ else
+ break;
+ }
+ }
+
+ if (!instr1)
+ return -EOPNOTSUPP;
+
+ if (!instr2)
+ return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
+
+ return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
+}
+
+static int ls1x_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ if (check_only)
+ return ls1x_nand_check_op(chip, op);
+
+ return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only);
+}
+
+static int ls1x_nand_attach_chip(struct nand_chip *chip)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ u64 chipsize = nanddev_target_size(&chip->base);
+ int cell_size = 0;
+
+ switch (chipsize) {
+ case SZ_128M:
+ cell_size = 0x0;
+ break;
+ case SZ_256M:
+ cell_size = 0x1;
+ break;
+ case SZ_512M:
+ cell_size = 0x2;
+ break;
+ case SZ_1G:
+ cell_size = 0x3;
+ break;
+ case SZ_2G:
+ cell_size = 0x4;
+ break;
+ case SZ_4G:
+ cell_size = 0x5;
+ break;
+ case SZ_8G:
+ cell_size = 0x6;
+ break;
+ case SZ_16G:
+ cell_size = 0x7;
+ break;
+ default:
+ dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize);
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ break;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set cell size */
+ regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK,
+ FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size));
+
+ regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK,
+ FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
+
+ regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK,
+ FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
+
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+ return 0;
+}
+
+static const struct nand_controller_ops ls1x_nand_controller_ops = {
+ .exec_op = ls1x_nand_exec_op,
+ .attach_chip = ls1x_nand_attach_chip,
+};
+
+static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host)
+{
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+}
+
+static int ls1x_nand_controller_init(struct ls1x_nand_host *host)
+{
+ struct device *dev = host->dev;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg = {};
+ int ret;
+
+ host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config);
+ if (IS_ERR(host->regmap))
+ return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
+
+ chan = dma_request_chan(dev, "rxtx");
+ if (IS_ERR(chan))
+ return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
+ host->dma_chan = chan;
+
+ cfg.src_addr = host->dma_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr = host->dma_base;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ret = dmaengine_slave_config(host->dma_chan, &cfg);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to config DMA channel\n");
+
+ init_completion(&host->dma_complete);
+
+ return 0;
+}
+
+static int ls1x_nand_chip_init(struct ls1x_nand_host *host)
+{
+ struct device *dev = host->dev;
+ int nchips = of_get_child_count(dev->of_node);
+ struct device_node *chip_np;
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (nchips != 1)
+ return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
+
+ chip_np = of_get_next_child(dev->of_node, NULL);
+ if (!chip_np)
+ return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
+
+ nand_set_flash_node(chip, chip_np);
+ of_node_put(chip_np);
+ if (!mtd->name)
+ return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
+
+ nand_set_controller_data(chip, host);
+ chip->controller = &host->controller;
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
+ chip->buf_align = 16;
+ mtd->dev.parent = dev;
+ mtd->owner = THIS_MODULE;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return dev_err_probe(dev, ret, "failed to register MTD device\n");
+ }
+
+ return 0;
+}
+
+static int ls1x_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct ls1x_nand_data *data;
+ struct ls1x_nand_host *host;
+ struct resource *res;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->reg_base))
+ return PTR_ERR(host->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
+ if (!res)
+ return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
+
+ host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, host->dma_base))
+ return -ENXIO;
+
+ host->dev = dev;
+ host->data = data;
+ host->controller.ops = &ls1x_nand_controller_ops;
+
+ nand_controller_init(&host->controller);
+
+ ret = ls1x_nand_controller_init(host);
+ if (ret)
+ goto err;
+
+ ret = ls1x_nand_chip_init(host);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+err:
+ ls1x_nand_controller_cleanup(host);
+
+ return ret;
+}
+
+static void ls1x_nand_remove(struct platform_device *pdev)
+{
+ struct ls1x_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ ls1x_nand_controller_cleanup(host);
+}
+
+static const struct ls1x_nand_data ls1b_nand_data = {
+ .status_field = GENMASK(15, 8),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .set_addr = ls1b_nand_set_addr,
+};
+
+static const struct ls1x_nand_data ls1c_nand_data = {
+ .status_field = GENMASK(23, 16),
+ .op_scope_field = GENMASK(29, 16),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .set_addr = ls1c_nand_set_addr,
+};
+
+static const struct of_device_id ls1x_nand_match[] = {
+ {
+ .compatible = "loongson,ls1b-nand-controller",
+ .data = &ls1b_nand_data,
+ },
+ {
+ .compatible = "loongson,ls1c-nand-controller",
+ .data = &ls1c_nand_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls1x_nand_match);
+
+static struct platform_driver ls1x_nand_driver = {
+ .probe = ls1x_nand_probe,
+ .remove = ls1x_nand_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ls1x_nand_match,
+ },
+};
+
+module_platform_driver(ls1x_nand_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 5eaa0be367cd..1003cf118c01 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -1863,7 +1863,12 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
const struct nand_op_instr *instr = NULL;
unsigned int op_id = 0;
unsigned int len = 0;
- int ret;
+ int ret, reg_base;
+
+ reg_base = NAND_READ_LOCATION_0;
+
+ if (nandc->props->qpic_version2)
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
ret = qcom_parse_instructions(chip, subop, &q_op);
if (ret)
@@ -1915,14 +1920,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
- nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+ if (nandc->props->qpic_version2)
+ nandc_set_read_loc_last(chip, reg_base, 0, len, 1);
+ else
+ nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
if (!nandc->props->qpic_version2) {
qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
}
- nandc->buf_count = len;
+ nandc->buf_count = 512;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
config_nand_single_cw_page_read(chip, false, 0);
@@ -2360,6 +2368,7 @@ static const struct qcom_nandc_props ipq806x_nandc_props = {
.supports_bam = false,
.use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq4019_nandc_props = {
@@ -2367,6 +2376,7 @@ static const struct qcom_nandc_props ipq4019_nandc_props = {
.supports_bam = true,
.nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
@@ -2374,6 +2384,7 @@ static const struct qcom_nandc_props ipq8074_nandc_props = {
.supports_bam = true,
.nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props sdx55_nandc_props = {
@@ -2382,6 +2393,7 @@ static const struct qcom_nandc_props sdx55_nandc_props = {
.nandc_part_of_qpic = true,
.qpic_version2 = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
/*
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index fab371e3e9b7..162cd5f4f234 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
@@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
index 6046c73f8424..2ee498230ec1 100644
--- a/drivers/mtd/nand/spi/alliancememory.c
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -17,20 +17,20 @@
#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int am_get_eccsize(struct mtd_info *mtd)
{
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
index bb5298911137..2b4df1d917ac 100644
--- a/drivers/mtd/nand/spi/ato.c
+++ b/drivers/mtd/nand/spi/ato.c
@@ -14,17 +14,17 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index d16e42cf8fae..7099db7a62be 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -22,7 +22,7 @@
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
int ret;
@@ -36,7 +36,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
*spinand->scratchbuf = val;
@@ -362,7 +362,7 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
static int spinand_write_enable_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -372,7 +372,7 @@ static int spinand_load_page_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+ struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -519,7 +519,7 @@ static int spinand_program_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -529,7 +529,7 @@ static int spinand_erase_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
- struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -549,8 +549,8 @@ static int spinand_erase_op(struct spinand_device *spinand,
int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
unsigned long poll_delay_us, u8 *s)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
- spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(REG_STATUS,
+ spinand->scratchbuf);
u8 status;
int ret;
@@ -583,7 +583,7 @@ out:
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(
+ struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP(
naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
@@ -596,7 +596,7 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
static int spinand_reset_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_RESET_OP;
+ struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP;
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index a164d821464d..9e286612a296 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -18,18 +18,18 @@
(CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* OOB spare area map (64 bytes)
@@ -137,8 +137,8 @@ static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
- struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
- struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
u8 status;
int ret;
@@ -199,7 +199,7 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
- 0x7f, 0x7f),
+ 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
index ecd5f6bffa33..7c61644bfb10 100644
--- a/drivers/mtd/nand/spi/foresee.c
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -12,18 +12,18 @@
#define SPINAND_MFR_FORESEE 0xCD
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index d620bb02a20a..cb1d316fc4d8 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -24,44 +24,44 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(0, 0, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -185,7 +185,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;
@@ -228,7 +228,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 1ef08ad850a2..eeaf5bf9f082 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -28,18 +28,18 @@ struct macronix_priv {
};
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -148,8 +148,8 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
static int macronix_set_read_retry(struct spinand_device *spinand,
unsigned int retry_mode)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
- spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
+ spinand->scratchbuf);
*spinand->scratchbuf = retry_mode;
return spi_mem_exec_op(spinand->spimem, &op);
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 691f8a2e0791..8281c9d3f4f7 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -35,33 +35,33 @@
(CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x4_update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/* Micron MT29F2G01AAAED Device */
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x1_update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -137,7 +137,7 @@ static const struct mtd_ooblayout_ops micron_4_ooblayout = {
static int micron_select_target(struct spinand_device *spinand,
unsigned int target)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MICRON_DIE_SELECT_REG,
spinand->scratchbuf);
if (target > 1)
@@ -251,8 +251,8 @@ static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
- struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
- struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
u8 status;
int ret;
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 6e7cc6995380..4670bac41245 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -22,20 +22,20 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c
index 961df0d74984..51d61785df61 100644
--- a/drivers/mtd/nand/spi/skyhigh.c
+++ b/drivers/mtd/nand/spi/skyhigh.c
@@ -17,20 +17,20 @@
#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 2e2106b2705f..4c6923047aeb 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -15,28 +15,28 @@
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* Backward compatibility for 1st generation Serial NAND devices
* which don't support Quad Program Load operation.
*/
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 8394a1b1fb0c..19f8dd4a6370 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -23,34 +23,50 @@
* "X4" in the core is equivalent to "quad output" in the datasheets.
*/
-static SPINAND_OP_VARIANTS(read_cache_dtr_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
+static SPINAND_OP_VARIANTS(read_cache_octal_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 86 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_8S_OP(0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -141,12 +157,47 @@ static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.free = w25n02kv_ooblayout_free,
};
+static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 12;
+ region->length = 4;
+
+ return 0;
+}
+
+static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = 16 * section;
+ region->length = 12;
+
+ /* Extract BBM */
+ if (!section) {
+ region->offset += 2;
+ region->length -= 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w35n01jw_ooblayout = {
+ .ecc = w35n01jw_ooblayout_ecc,
+ .free = w35n01jw_ooblayout_free,
+};
+
static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -213,7 +264,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
@@ -227,6 +278,33 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W35N01JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdc, 0x21),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
+ SPINAND_INFO("W35N02JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 2, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
+ SPINAND_INFO("W35N04JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 4, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
/* 2G-bit densities */
SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
@@ -242,7 +320,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index 3f539ca0de86..37336d5958a9 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -23,20 +23,20 @@
#define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 55644a3cd88c..e97f5cbd9aad 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -58,6 +58,31 @@ macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor)
return 0;
}
+static int
+mx25l3255e_late_init_fixups(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /*
+ * SFDP of MX25L3255E is JESD216, which does not include the Quad
+ * Enable bit Requirement in BFPT. As a result, during BFPT parsing,
+ * the quad_enable method is not set to spi_nor_sr1_bit6_quad_enable.
+ * Therefore, it is necessary to correct this setting by late_init.
+ */
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+
+ /*
+ * In addition, MX25L3255E also supports 1-4-4 page program in 3-byte
+ * address mode. However, since the 3-byte address 1-4-4 page program
+ * is not defined in SFDP, it needs to be configured in late_init.
+ */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_4_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_4_4],
+ SPINOR_OP_PP_1_4_4, SNOR_PROTO_1_4_4);
+
+ return 0;
+}
+
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
.post_sfdp = macronix_qpp4b_post_sfdp_fixups,
@@ -67,6 +92,10 @@ static const struct spi_nor_fixups macronix_qpp4b_fixups = {
.post_sfdp = macronix_qpp4b_post_sfdp_fixups,
};
+static const struct spi_nor_fixups mx25l3255e_fixups = {
+ .late_init = mx25l3255e_late_init_fixups,
+};
+
static const struct flash_info macronix_nor_parts[] = {
{
.id = SNOR_ID(0xc2, 0x20, 0x10),
@@ -88,10 +117,8 @@ static const struct flash_info macronix_nor_parts[] = {
.name = "mx25l8005",
.size = SZ_1M,
}, {
+ /* MX25L1606E */
.id = SNOR_ID(0xc2, 0x20, 0x15),
- .name = "mx25l1606e",
- .size = SZ_2M,
- .no_sfdp_flags = SECT_4K,
}, {
.id = SNOR_ID(0xc2, 0x20, 0x16),
.name = "mx25l3205d",
@@ -103,29 +130,21 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_8M,
.no_sfdp_flags = SECT_4K,
}, {
+ /* MX25L12805D */
.id = SNOR_ID(0xc2, 0x20, 0x18),
- .name = "mx25l12805d",
- .size = SZ_16M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP,
- .no_sfdp_flags = SECT_4K,
}, {
+ /* MX25L25635E, MX25L25645G */
.id = SNOR_ID(0xc2, 0x20, 0x19),
- .name = "mx25l25635e",
- .size = SZ_32M,
- .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixups = &mx25l25635_fixups
}, {
+ /* MX66L51235F */
.id = SNOR_ID(0xc2, 0x20, 0x1a),
- .name = "mx66l51235f",
- .size = SZ_64M,
- .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
+ /* MX66L1G45G */
.id = SNOR_ID(0xc2, 0x20, 0x1b),
- .name = "mx66l1g45g",
- .size = SZ_128M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66L2G45G */
@@ -167,29 +186,16 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_16M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
+ /* MX25U51245G */
.id = SNOR_ID(0xc2, 0x25, 0x3a),
- .name = "mx25u51245g",
- .size = SZ_64M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
- .fixup_flags = SPI_NOR_4B_OPCODES,
- .fixups = &macronix_qpp4b_fixups,
- }, {
- .id = SNOR_ID(0xc2, 0x25, 0x3a),
- .name = "mx66u51235f",
- .size = SZ_64M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
- .fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66U1G45G */
.id = SNOR_ID(0xc2, 0x25, 0x3b),
.fixups = &macronix_qpp4b_fixups,
}, {
+ /* MX66U2G45G */
.id = SNOR_ID(0xc2, 0x25, 0x3c),
- .name = "mx66u2g45g",
- .size = SZ_256M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
- .fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x26, 0x18),
@@ -215,15 +221,14 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_4M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
+ /* MX25UW51245G */
.id = SNOR_ID(0xc2, 0x81, 0x3a),
- .name = "mx25uw51245g",
.n_banks = 4,
.flags = SPI_NOR_RWW,
}, {
+ /* MX25L3255E */
.id = SNOR_ID(0xc2, 0x9e, 0x16),
- .name = "mx25l3255e",
- .size = SZ_4M,
- .no_sfdp_flags = SECT_4K,
+ .fixups = &mx25l3255e_fixups,
},
/*
* This spares us of adding new flash entries for flashes that can be
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 636520bb4b8c..81a74e07464f 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -96,7 +96,6 @@ config BNX2
config CNIC
tristate "QLogic CNIC support"
depends on PCI && (IPV6 || IPV6=n)
- depends on MMU
select BNX2
select UIO
help
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index 2e935d381b6b..659ef134ef16 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -24,7 +24,7 @@ config ATH11K_PCI
select MHI_BUS
select QRTR
select QRTR_MHI
- select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
help
This module adds support for PCIE bus
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index b3b15e1eb282..1ea1af1b8f6c 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -7,7 +7,7 @@ config ATH12K
select MHI_BUS
select QRTR
select QRTR_MHI
- select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
help
Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
802.11be) family of chipsets, for example WCN7850 and
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index 339b148d6793..c776543cbba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -1757,7 +1757,7 @@ iwl_mld_send_proto_offload(struct iwl_mld *mld,
addrconf_addr_solict_mult(&wowlan_data->target_ipv6_addrs[i],
&solicited_addr);
- for (j = 0; j < c; j++)
+ for (j = 0; j < n_nsc && j < c; j++)
if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
&solicited_addr) == 0)
break;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index da28295b4aac..9c0e4aaf4e8c 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -21,6 +21,7 @@ config GENERIC_PCI_IOMAP
menuconfig PCI
bool "PCI support"
depends on HAVE_PCI
+ depends on MMU
help
This option enables support for the PCI local bus, including
support for PCI-X and the foundations for PCI Express support.
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index b6851101ac36..69048869ef1c 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -369,7 +369,9 @@ void pci_bus_add_device(struct pci_dev *dev)
pdev->name);
}
- dev->match_driver = !dn || of_device_is_available(dn);
+ if (!dn || of_device_is_available(dn))
+ pci_dev_allow_binding(dev);
+
retval = device_attach(&dev->dev);
if (retval < 0 && retval != -EPROBE_DEFER)
pci_warn(dev, "device attach failed (%d)\n", retval);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index eb3cc28d43f8..886f6f43a895 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -3,6 +3,10 @@
menu "PCI controller drivers"
depends on PCI
+config PCI_HOST_COMMON
+ tristate
+ select PCI_ECAM
+
config PCI_AARDVARK
tristate "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
@@ -120,10 +124,6 @@ config PCI_FTPCI100
depends on OF
default ARCH_GEMINI
-config PCI_HOST_COMMON
- tristate
- select PCI_ECAM
-
config PCI_HOST_GENERIC
tristate "Generic PCI host controller"
depends on OF
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 8a0044bb3989..666e16b6367f 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -4,16 +4,16 @@ menu "Cadence-based PCIe controllers"
depends on PCI
config PCIE_CADENCE
- bool
+ tristate
config PCIE_CADENCE_HOST
- bool
+ tristate
depends on OF
select IRQ_DOMAIN
select PCIE_CADENCE
config PCIE_CADENCE_EP
- bool
+ tristate
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE
@@ -43,13 +43,14 @@ config PCIE_CADENCE_PLAT_EP
different vendors SoCs.
config PCI_J721E
- bool
+ tristate
+ select PCIE_CADENCE_HOST if PCI_J721E_HOST != n
+ select PCIE_CADENCE_EP if PCI_J721E_EP != n
config PCI_J721E_HOST
- bool "TI J721E PCIe controller (host mode)"
+ tristate "TI J721E PCIe controller (host mode)"
depends on ARCH_K3 || COMPILE_TEST
depends on OF
- select PCIE_CADENCE_HOST
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
@@ -57,11 +58,10 @@ config PCI_J721E_HOST
core.
config PCI_J721E_EP
- bool "TI J721E PCIe controller (endpoint mode)"
+ tristate "TI J721E PCIe controller (endpoint mode)"
depends on ARCH_K3 || COMPILE_TEST
depends on OF
depends on PCI_ENDPOINT
- select PCIE_CADENCE_EP
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index ef1cfdae33bb..6c93f39d0288 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -27,6 +28,7 @@
#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
#define ENABLE_REG_SYS_2 0x108
+#define ENABLE_CLR_REG_SYS_2 0x308
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
#define LINK_DOWN BIT(1)
@@ -116,6 +118,15 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
+static void j721e_pcie_disable_link_irq(struct j721e_pcie *pcie)
+{
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_CLR_REG_SYS_2);
+ reg |= pcie->linkdown_irq_regfield;
+ j721e_pcie_intd_writel(pcie, ENABLE_CLR_REG_SYS_2, reg);
+}
+
static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
{
u32 reg;
@@ -153,11 +164,7 @@ static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie)
u32 reg;
reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS);
- reg &= LINK_STATUS;
- if (reg == LINK_UP_DL_COMPLETED)
- return true;
-
- return false;
+ return (reg & LINK_STATUS) == LINK_UP_DL_COMPLETED;
}
static const struct cdns_pcie_ops j721e_pcie_ops = {
@@ -464,7 +471,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_HOST))
return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
@@ -483,7 +490,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->cdns_pcie = cdns_pcie;
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_EP))
return -ENODEV;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
@@ -633,9 +640,22 @@ static void j721e_pcie_remove(struct platform_device *pdev)
struct j721e_pcie *pcie = platform_get_drvdata(pdev);
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
struct device *dev = &pdev->dev;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ rc = container_of(cdns_pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+ } else {
+ ep = container_of(cdns_pcie, struct cdns_pcie_ep, pcie);
+ cdns_pcie_ep_disable(ep);
+ }
+
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
clk_disable_unprepare(pcie->refclk);
cdns_pcie_disable_phy(cdns_pcie);
+ j721e_pcie_disable_link_irq(pcie);
pm_runtime_put(dev);
pm_runtime_disable(dev);
}
@@ -730,4 +750,8 @@ static struct platform_driver j721e_pcie_driver = {
.pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
-builtin_platform_driver(j721e_pcie_driver);
+module_platform_driver(j721e_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for TI's J721E and related SoCs");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 599ec4b1223e..8ab6cf70c18e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -6,12 +6,14 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
@@ -220,10 +222,11 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u8 mmc = order_base_2(nr_irqs);
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
@@ -262,7 +265,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
*/
mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
- return mme;
+ return 1 << mme;
}
static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
@@ -281,12 +284,11 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
- u16 interrupts, enum pci_barno bir,
- u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -298,7 +300,7 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
/* Set MSI-X BAR and offset */
@@ -308,7 +310,7 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
/* Set PBA BAR and offset. BAR must match MSI-X BAR */
reg = cap + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
return 0;
@@ -337,10 +339,10 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
if (is_asserted) {
ep->irq_pending |= BIT(intx);
- msg_code = MSG_CODE_ASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx;
} else {
ep->irq_pending &= ~BIT(intx);
- msg_code = MSG_CODE_DEASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx;
}
spin_lock_irqsave(&ep->lock, flags);
@@ -644,6 +646,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features,
};
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+ struct device *dev = ep->pcie.dev;
+ struct pci_epc *epc = to_pci_epc(dev);
+
+ pci_epc_deinit_notify(epc);
+ pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr,
+ SZ_128K);
+ pci_epc_mem_exit(epc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable);
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
@@ -751,3 +764,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 8af95e9da7ce..59a4631de79f 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -72,6 +73,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return rc->cfg_base + (where & 0xfff);
}
+EXPORT_SYMBOL_GPL(cdns_pci_map_bus);
static struct pci_ops cdns_pcie_host_ops = {
.map_bus = cdns_pci_map_bus,
@@ -150,6 +152,14 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie)
return ret;
}
+static void cdns_pcie_host_disable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val & ~CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+}
+
static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
{
u32 val;
@@ -175,6 +185,26 @@ static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
return ret;
}
+static void cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 value, ctrl;
+
+ cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, 0xffff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0xff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0xff);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, 0xffffffff);
+ cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, 0xffff);
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ value = ~(CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+}
+
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -391,6 +421,32 @@ static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
return resource_size(entry2->res) - resource_size(entry1->res);
}
+static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ enum cdns_pcie_rp_bar bar;
+ u32 value;
+
+ /* Reset inbound configuration for all BARs which were being used */
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (rc->avail_ib_bar[bar])
+ continue;
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), 0);
+
+ if (bar == RP_NO_BAR)
+ continue;
+
+ value = ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+ }
+}
+
static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -428,6 +484,29 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
return 0;
}
+static void cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource_entry *entry;
+ int r;
+
+ cdns_pcie_host_unmap_dma_ranges(rc);
+
+ /*
+ * Reset outbound region 0 which was reserved for configuration space
+ * accesses.
+ */
+ cdns_pcie_reset_outbound_region(pcie, 0);
+
+ /* Reset rest of the outbound regions */
+ r = 1;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ cdns_pcie_reset_outbound_region(pcie, r);
+ r++;
+ }
+}
+
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -485,6 +564,12 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
return cdns_pcie_host_map_dma_ranges(rc);
}
+static void cdns_pcie_host_deinit(struct cdns_pcie_rc *rc)
+{
+ cdns_pcie_host_deinit_address_translation(rc);
+ cdns_pcie_host_deinit_root_port(rc);
+}
+
int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -495,6 +580,15 @@ int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
return cdns_pcie_host_init_address_translation(rc);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_init);
+
+static void cdns_pcie_host_link_disable(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+
+ cdns_pcie_stop_link(pcie);
+ cdns_pcie_host_disable_ptm_response(pcie);
+}
int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
{
@@ -519,6 +613,20 @@ int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
return 0;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_link_setup);
+
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+
+ cdns_pcie_host_deinit(rc);
+ cdns_pcie_host_link_disable(rc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_disable);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
@@ -570,14 +678,10 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
if (!bridge->ops)
bridge->ops = &cdns_pcie_host_ops;
- ret = pci_host_probe(bridge);
- if (ret < 0)
- goto err_init;
-
- return 0;
-
- err_init:
- pm_runtime_put_sync(dev);
-
- return ret;
+ return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 204e045aed8c..70a19573440e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -4,6 +4,7 @@
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include "pcie-cadence.h"
@@ -23,6 +24,7 @@ void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
@@ -100,6 +102,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
u8 busnr, u8 fn,
@@ -134,6 +137,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
{
@@ -146,6 +150,7 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
{
@@ -156,6 +161,7 @@ void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
phy_exit(pcie->phy[i]);
}
}
+EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
{
@@ -184,6 +190,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy);
int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
{
@@ -243,6 +250,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
static int cdns_pcie_suspend_noirq(struct device *dev)
{
@@ -271,3 +279,7 @@ const struct dev_pm_ops cdns_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
cdns_pcie_resume_noirq)
};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 39ee9945c903..a149845d341a 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -250,17 +250,6 @@ struct cdns_pcie_rp_ib_bar {
struct cdns_pcie;
-enum cdns_pcie_msg_code {
- MSG_CODE_ASSERT_INTA = 0x20,
- MSG_CODE_ASSERT_INTB = 0x21,
- MSG_CODE_ASSERT_INTC = 0x22,
- MSG_CODE_ASSERT_INTD = 0x23,
- MSG_CODE_DEASSERT_INTA = 0x24,
- MSG_CODE_DEASSERT_INTB = 0x25,
- MSG_CODE_DEASSERT_INTC = 0x26,
- MSG_CODE_DEASSERT_INTD = 0x27,
-};
-
enum cdns_pcie_msg_routing {
/* Route to Root Complex */
MSG_ROUTING_TO_RC,
@@ -519,10 +508,11 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
return true;
}
-#ifdef CONFIG_PCIE_CADENCE_HOST
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
@@ -541,6 +531,10 @@ static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return 0;
}
+static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+}
+
static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -548,13 +542,18 @@ static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int d
}
#endif
-#ifdef CONFIG_PCIE_CADENCE_EP
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep);
#else
static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
return 0;
}
+
+static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+}
#endif
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 3219704aba0e..f97f5266d196 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -118,12 +118,12 @@ static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
-static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+static bool dra7xx_pcie_link_up(struct dw_pcie *pci)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
- return !!(reg & LINK_UP);
+ return reg & LINK_UP;
}
static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index ace736b025b1..1f0e98d07109 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -209,12 +209,12 @@ static struct pci_ops exynos_pci_ops = {
.write = exynos_pcie_wr_own_conf,
};
-static int exynos_pcie_link_up(struct dw_pcie *pci)
+static bool exynos_pcie_link_up(struct dw_pcie *pci)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- return (val & PCIE_ELBI_XMLH_LINKUP);
+ return val & PCIE_ELBI_XMLH_LINKUP;
}
static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 5f267dd261b5..5a38cfaf989b 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -45,9 +45,14 @@
#define IMX95_PCIE_PHY_GEN_CTRL 0x0
#define IMX95_PCIE_REF_USE_PAD BIT(17)
+#define IMX95_PCIE_PHY_MPLLA_CTRL 0x10
+#define IMX95_PCIE_PHY_MPLL_STATE BIT(30)
+
#define IMX95_PCIE_SS_RW_REG_0 0xf0
#define IMX95_PCIE_REF_CLKEN BIT(23)
#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
+#define IMX95_PCIE_SS_RW_REG_1 0xf4
+#define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31)
#define IMX95_PE0_GEN_CTRL_1 0x1050
#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
@@ -71,6 +76,9 @@
#define IMX95_SID_MASK GENMASK(5, 0)
#define IMX95_MAX_LUT 32
+#define IMX95_PCIE_RST_CTRL 0x3010
+#define IMX95_PCIE_COLD_RST BIT(0)
+
#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
enum imx_pcie_variants {
@@ -91,7 +99,7 @@ enum imx_pcie_variants {
};
#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
-#define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1)
+#define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1)
#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
@@ -105,6 +113,7 @@ enum imx_pcie_variants {
*/
#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
+#define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11)
#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
@@ -126,9 +135,15 @@ struct imx_pcie_drvdata {
int (*init_phy)(struct imx_pcie *pcie);
int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ int (*wait_pll_lock)(struct imx_pcie *pcie);
const struct dw_pcie_host_ops *ops;
};
+struct imx_lut_data {
+ u32 data1;
+ u32 data2;
+};
+
struct imx_pcie {
struct dw_pcie *pci;
struct gpio_desc *reset_gpiod;
@@ -148,6 +163,8 @@ struct imx_pcie {
struct regulator *vph;
void __iomem *phy_base;
+ /* LUT data for pcie */
+ struct imx_lut_data luts[IMX95_MAX_LUT];
/* power domain for pcie */
struct device *pd_pcie;
/* power domain for pcie phy */
@@ -224,6 +241,19 @@ static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
{
+ /*
+ * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready
+ * Through Beacon or PERST# De-assertion
+ *
+ * When the auxiliary power is not available, the controller
+ * cannot exit from L23 Ready with beacon or PERST# de-assertion
+ * when main power is not removed.
+ *
+ * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1,
+ IMX95_PCIE_SYS_AUX_PWR_DET);
+
regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_PHY_CR_PARA_SEL,
@@ -460,6 +490,23 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
+static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
+{
+ u32 val;
+ struct device *dev = imx_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_MPLLA_CTRL, val,
+ val & IMX95_PCIE_PHY_MPLL_STATE,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT)) {
+ dev_err(dev, "PCIe PLL lock timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
{
unsigned long phy_rate = 0;
@@ -773,6 +820,43 @@ static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
return 0;
}
+static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ u32 val;
+
+ if (assert) {
+ /*
+ * From i.MX95 PCIe PHY perspective, the COLD reset toggle
+ * should be complete after power-up by the following sequence.
+ * > 10us(at power-up)
+ * > 10ns(warm reset)
+ * |<------------>|
+ * ______________
+ * phy_reset ____/ \________________
+ * ____________
+ * ref_clk_en_______________________/
+ * Toggle COLD reset aligned with this sequence for i.MX95 PCIe.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ /*
+ * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the
+ * hardware by doing a read. Otherwise, there is no guarantee
+ * that the write has reached the hardware before udelay().
+ */
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(15);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(10);
+ }
+
+ return 0;
+}
+
static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
{
reset_control_assert(imx_pcie->pciephy_reset);
@@ -860,6 +944,12 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
u32 tmp;
int ret;
+ if (!(imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) {
+ imx_pcie_ltssm_enable(dev);
+ return 0;
+ }
+
/*
* Force Gen1 operation when starting the link. In case the link is
* started in Gen2 mode, there is a possibility the devices on the
@@ -875,11 +965,11 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
/* Start LTSSM. */
imx_pcie_ltssm_enable(dev);
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
- goto err_reset_phy;
-
if (pci->max_link_speed > 1) {
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
+
/* Allow faster modes after the link is up */
dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
@@ -896,34 +986,15 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
dw_pcie_dbi_ro_wr_dis(pci);
- if (imx_pcie->drvdata->flags &
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
-
- /*
- * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
- * from i.MX6 family when no link speed transition
- * occurs and we go Gen1 -> yep, Gen1. The difference
- * is that, in such case, it will not be cleared by HW
- * which will cause the following code to report false
- * failure.
- */
- ret = imx_pcie_wait_for_speed_change(imx_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
- }
-
- /* Make sure link training is finished as well! */
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
+ }
} else {
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
- dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
@@ -1182,6 +1253,12 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
goto err_phy_off;
}
+ if (imx_pcie->drvdata->wait_pll_lock) {
+ ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie);
+ if (ret < 0)
+ goto err_phy_off;
+ }
+
imx_setup_phy_mpll(imx_pcie);
return 0;
@@ -1214,6 +1291,32 @@ static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
regulator_disable(imx_pcie->vpcie);
}
+static void imx_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+ u32 val;
+
+ if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) {
+ /*
+ * ERR051586: Compliance with 8GT/s Receiver Impedance ECN
+ *
+ * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * is 1 which makes receiver non-compliant with the ZRX-DC
+ * parameter for 2.5 GT/s when operating at 8 GT/s or higher.
+ * It causes unnecessary timeout in L1.
+ *
+ * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * to 0.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+}
+
/*
* In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
* register is reserved, so the generic DWC implementation of sending the
@@ -1239,6 +1342,7 @@ static const struct dw_pcie_host_ops imx_pcie_host_ops = {
static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
.init = imx_pcie_host_init,
.deinit = imx_pcie_host_exit,
+ .post_init = imx_pcie_host_post_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -1350,6 +1454,7 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
+ imx_pcie_host_post_init(pp);
ret = dw_pcie_ep_init_registers(ep);
if (ret) {
@@ -1386,6 +1491,42 @@ static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
}
}
+static void imx_pcie_lut_save(struct imx_pcie *imx_pcie)
+{
+ u32 data1, data2;
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL,
+ IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (data1 & IMX95_PE0_LUT_VLD) {
+ imx_pcie->luts[i].data1 = data1;
+ imx_pcie->luts[i].data2 = data2;
+ } else {
+ imx_pcie->luts[i].data1 = 0;
+ imx_pcie->luts[i].data2 = 0;
+ }
+ }
+}
+
+static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie)
+{
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0)
+ continue;
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1,
+ imx_pcie->luts[i].data1);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2,
+ imx_pcie->luts[i].data2);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i);
+ }
+}
+
static int imx_pcie_suspend_noirq(struct device *dev)
{
struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
@@ -1394,6 +1535,8 @@ static int imx_pcie_suspend_noirq(struct device *dev)
return 0;
imx_pcie_msi_save_restore(imx_pcie, true);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_save(imx_pcie);
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
/*
* The minimum for a workaround would be to set PERST# and to
@@ -1438,6 +1581,8 @@ static int imx_pcie_resume_noirq(struct device *dev)
if (ret)
return ret;
}
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_restore(imx_pcie);
imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
@@ -1649,7 +1794,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
.flags = IMX_PCIE_FLAG_IMX_PHY |
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
IMX_PCIE_FLAG_BROKEN_SUSPEND |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
@@ -1665,7 +1810,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX6SX] = {
.variant = IMX6SX,
.flags = IMX_PCIE_FLAG_IMX_PHY |
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
.ltssm_off = IOMUXC_GPR12,
@@ -1680,7 +1825,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX6QP] = {
.variant = IMX6QP,
.flags = IMX_PCIE_FLAG_IMX_PHY |
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
@@ -1747,12 +1892,15 @@ static const struct imx_pcie_drvdata drvdata[] = {
.variant = IMX95,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
.mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .core_reset = imx95_pcie_core_reset,
.init_phy = imx95_pcie_init_phy,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
@@ -1799,12 +1947,15 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX95_EP] = {
.variant = IMX95_EP,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
IMX_PCIE_FLAG_SUPPORT_64BIT,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
.mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
.init_phy = imx95_pcie_init_phy,
+ .core_reset = imx95_pcie_core_reset,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
.epc_features = &imx95_pcie_epc_features,
.mode = DW_PCIE_EP_TYPE,
},
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 1385d9db7b32..2b2632e513b5 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -492,13 +492,12 @@ static struct pci_ops ks_pcie_ops = {
* @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
* controller driver information.
*/
-static int ks_pcie_link_up(struct dw_pcie *pci)
+static bool ks_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
- val &= PORT_LOGIC_LTSSM_STATE_MASK;
- return (val == PORT_LOGIC_LTSSM_STATE_L0);
+ return (val & PORT_LOGIC_LTSSM_STATE_MASK) == PORT_LOGIC_LTSSM_STATE_L0;
}
static void ks_pcie_stop_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index db9482a113e9..787469d1b396 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -335,7 +335,7 @@ static struct pci_ops meson_pci_ops = {
.write = pci_generic_config_write,
};
-static int meson_pcie_link_up(struct dw_pcie *pci)
+static bool meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
struct device *dev = pci->dev;
@@ -363,7 +363,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
dev_dbg(dev, "speed_okay\n");
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
- return 1;
+ return true;
cnt++;
@@ -371,7 +371,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
} while (cnt < WAIT_LINKUP_TIMEOUT);
dev_err(dev, "error: wait linkup timeout\n");
- return 0;
+ return false;
}
static int meson_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index b5c599ccaacf..c2650fd0d458 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -139,7 +139,7 @@ static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
return ret;
}
-static int armada8k_pcie_link_up(struct dw_pcie *pci)
+static bool armada8k_pcie_link_up(struct dw_pcie *pci)
{
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
@@ -147,10 +147,10 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
- return 1;
+ return true;
dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
- return 0;
+ return false;
}
static int armada8k_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
index 9e6f4d00f262..c67601096c48 100644
--- a/drivers/pci/controller/dwc/pcie-designware-debugfs.c
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -642,16 +642,262 @@ static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
&dwc_pcie_ltssm_status_ops);
}
+static int dw_pcie_ptm_check_capability(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci);
+
+ return pci->ptm_vsec_offset;
+}
+
+static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_REQ_AUTO_UPDATE_ENABLED;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_REQ_AUTO_UPDATE_ENABLED;
+ val |= PTM_REQ_START_UPDATE;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val))
+ *mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else
+ /*
+ * PTM_REQ_START_UPDATE is a self clearing register bit. So if
+ * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that
+ * manual update is used.
+ */
+ *mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (valid) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ *valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val);
+
+ return 0;
+}
+
+static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static bool dw_pcie_ptm_context_update_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
+{
+ /* PTM local clock is always visible */
+ return true;
+}
+
+static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t1_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t2_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t3_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t4_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+const struct pcie_ptm_ops dw_pcie_ptm_ops = {
+ .check_capability = dw_pcie_ptm_check_capability,
+ .context_update_write = dw_pcie_ptm_context_update_write,
+ .context_update_read = dw_pcie_ptm_context_update_read,
+ .context_valid_write = dw_pcie_ptm_context_valid_write,
+ .context_valid_read = dw_pcie_ptm_context_valid_read,
+ .local_clock_read = dw_pcie_ptm_local_clock_read,
+ .master_clock_read = dw_pcie_ptm_master_clock_read,
+ .t1_read = dw_pcie_ptm_t1_read,
+ .t2_read = dw_pcie_ptm_t2_read,
+ .t3_read = dw_pcie_ptm_t3_read,
+ .t4_read = dw_pcie_ptm_t4_read,
+ .context_update_visible = dw_pcie_ptm_context_update_visible,
+ .context_valid_visible = dw_pcie_ptm_context_valid_visible,
+ .local_clock_visible = dw_pcie_ptm_local_clock_visible,
+ .master_clock_visible = dw_pcie_ptm_master_clock_visible,
+ .t1_visible = dw_pcie_ptm_t1_visible,
+ .t2_visible = dw_pcie_ptm_t2_visible,
+ .t3_visible = dw_pcie_ptm_t3_visible,
+ .t4_visible = dw_pcie_ptm_t4_visible,
+};
+
void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
{
if (!pci->debugfs)
return;
+ pcie_ptm_destroy_debugfs(pci->ptm_debugfs);
dwc_pcie_rasdes_debugfs_deinit(pci);
debugfs_remove_recursive(pci->debugfs->debug_dir);
}
-void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode)
{
char dirname[DWC_DEBUGFS_BUF_MAX];
struct device *dev = pci->dev;
@@ -674,4 +920,8 @@ void dwc_pcie_debugfs_init(struct dw_pcie *pci)
err);
dwc_pcie_ltssm_debugfs_init(pci, dir);
+
+ pci->mode = mode;
+ pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci,
+ &dw_pcie_ptm_ops);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1a0bf9341542..0ae54a94809b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -256,11 +256,11 @@ static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
return offset;
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT;
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- bar_index = reg & PCI_REBAR_CTRL_BAR_IDX;
+ bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
if (bar_index == bar)
return offset;
}
@@ -532,15 +532,16 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
- return val;
+ return 1 << val;
}
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u8 interrupts)
+ u8 nr_irqs)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dw_pcie_ep_func *ep_func;
+ u8 mmc = order_base_2(nr_irqs);
u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
@@ -550,7 +551,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
reg = ep_func->msi_cap + PCI_MSI_FLAGS;
val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
- val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts);
+ val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc);
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -575,11 +576,11 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno bir, u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -595,7 +596,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
dw_pcie_writew_dbi(pci, reg, val);
reg = ep_func->msix_cap + PCI_MSIX_TABLE;
@@ -603,7 +604,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
reg = ep_func->msix_cap + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -671,7 +672,7 @@ static const struct pci_epc_ops epc_ops = {
* @ep: DWC EP device
* @func_no: Function number of the endpoint
*
- * Return: 0 if success, errono otherwise.
+ * Return: 0 if success, errno otherwise.
*/
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
@@ -690,7 +691,7 @@ EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
* @func_no: Function number of the endpoint
* @interrupt_num: Interrupt number to be raised
*
- * Return: 0 if success, errono otherwise.
+ * Return: 0 if success, errno otherwise.
*/
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
@@ -875,8 +876,7 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
if (offset) {
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
/*
* PCIe r6.0, sec 7.8.6.2 require us to support at least one
@@ -897,7 +897,7 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
* is why RESBAR_CAP_REG is written here.
*/
val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- bar = val & PCI_REBAR_CTRL_BAR_IDX;
+ bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
if (ep->epf_bar[bar])
pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
else
@@ -1013,7 +1013,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
dw_pcie_ep_init_non_sticky_registers(pci);
- dwc_pcie_debugfs_init(pci);
+ dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE);
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d1cd48efad43..906277f9ffaf 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -523,6 +523,13 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
+ if (pci->num_lanes < 1)
+ pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
+
+ ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
+ if (ret)
+ goto err_free_msi;
+
/*
* Allocate the resource for MSG TLP before programming the iATU
* outbound window in dw_pcie_setup_rc(). Since the allocation depends
@@ -567,7 +574,7 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (pp->ops->post_init)
pp->ops->post_init(pp);
- dwc_pcie_debugfs_init(pci);
+ dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
return 0;
@@ -828,6 +835,77 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
return 0;
}
+static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u8 lane_eq_offset, lane_reg_size, cap_id;
+ u8 *presets;
+ u32 cap;
+ int i;
+
+ if (speed == PCIE_SPEED_8_0GT) {
+ presets = (u8 *)pp->presets.eq_presets_8gts;
+ lane_eq_offset = PCI_SECPCI_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_SECPCI;
+ /* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
+ lane_reg_size = 0x2;
+ } else if (speed == PCIE_SPEED_16_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
+ lane_eq_offset = PCI_PL_16GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_16GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_32_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
+ lane_eq_offset = PCI_PL_32GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_32GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_64_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
+ lane_eq_offset = PCI_PL_64GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_64GT;
+ lane_reg_size = 0x1;
+ } else {
+ return;
+ }
+
+ if (presets[0] == PCI_EQ_RESV)
+ return;
+
+ cap = dw_pcie_find_ext_capability(pci, cap_id);
+ if (!cap)
+ return;
+
+ /*
+ * Write preset values to the registers byte-by-byte for the given
+ * number of lanes and register size.
+ */
+ for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
+ dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
+}
+
+static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
+
+ /*
+ * Lane equalization settings need to be applied for all data rates the
+ * controller supports and for all supported lanes.
+ */
+
+ if (speed >= PCIE_SPEED_8_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
+
+ if (speed >= PCIE_SPEED_16_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
+
+ if (speed >= PCIE_SPEED_32_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
+
+ if (speed >= PCIE_SPEED_64_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
+}
+
int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -881,6 +959,7 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ dw_pcie_config_presets(pp);
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 97d76d3dc066..4d794964fa0f 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -54,6 +54,14 @@ static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
[DW_PCIE_PWR_RST] = "pwr",
};
+static const struct dwc_pcie_vsec_id dwc_pcie_ptm_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* EP */
+ .vsec_id = 0x03, .vsec_rev = 0x1 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* RC */
+ .vsec_id = 0x04, .vsec_rev = 0x1 },
+ { }
+};
+
static int dw_pcie_get_clocks(struct dw_pcie *pci)
{
int i, ret;
@@ -330,6 +338,12 @@ u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_ptm_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ptm_capability);
+
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -711,7 +725,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
-int dw_pcie_link_up(struct dw_pcie *pci)
+bool dw_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
@@ -781,6 +795,14 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
}
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci)
+{
+ u8 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+
+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+}
+
static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
{
u32 lnkcap, lwsc, plc;
@@ -797,22 +819,19 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
/* Set link width speed control register */
lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
switch (num_lanes) {
case 1:
plc |= PORT_LINK_MODE_1_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
break;
case 2:
plc |= PORT_LINK_MODE_2_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
break;
case 4:
plc |= PORT_LINK_MODE_4_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
break;
case 8:
plc |= PORT_LINK_MODE_8_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
break;
default:
dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 56aafdbcdaca..ce9e18554e42 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -25,6 +25,8 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include "../../pci.h"
+
/* DWC PCIe IP-core versions (native support since v4.70a) */
#define DW_PCIE_VER_365A 0x3336352a
#define DW_PCIE_VER_460A 0x3436302a
@@ -260,6 +262,21 @@
#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+/* PTM register definitions */
+#define PTM_RES_REQ_CTRL 0x8
+#define PTM_RES_CCONTEXT_VALID BIT(0)
+#define PTM_REQ_AUTO_UPDATE_ENABLED BIT(0)
+#define PTM_REQ_START_UPDATE BIT(1)
+
+#define PTM_LOCAL_LSB 0x10
+#define PTM_LOCAL_MSB 0x14
+#define PTM_T1_T2_LSB 0x18
+#define PTM_T1_T2_MSB 0x1c
+#define PTM_T3_T4_LSB 0x28
+#define PTM_T3_T4_MSB 0x2c
+#define PTM_MASTER_LSB 0x38
+#define PTM_MASTER_MSB 0x3c
+
/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
@@ -412,6 +429,7 @@ struct dw_pcie_rp {
int msg_atu_index;
struct resource *msg_res;
bool use_linkup_irq;
+ struct pci_eq_presets presets;
};
struct dw_pcie_ep_ops {
@@ -462,7 +480,7 @@ struct dw_pcie_ops {
size_t size, u32 val);
void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
- int (*link_up)(struct dw_pcie *pcie);
+ bool (*link_up)(struct dw_pcie *pcie);
enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie);
int (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
@@ -503,6 +521,9 @@ struct dw_pcie {
struct gpio_desc *pe_rst;
bool suspended;
struct debugfs_info *debugfs;
+ enum dw_pcie_device_mode mode;
+ u16 ptm_vsec_offset;
+ struct pci_ptm_debugfs *ptm_debugfs;
/*
* If iATU input addresses are offset from CPU physical addresses,
@@ -530,6 +551,7 @@ void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci);
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
@@ -537,9 +559,10 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val);
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-int dw_pcie_link_up(struct dw_pcie *pci);
+bool dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci);
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
@@ -871,10 +894,11 @@ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
#endif
#ifdef CONFIG_PCIE_DW_DEBUGFS
-void dwc_pcie_debugfs_init(struct dw_pcie *pci);
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode);
void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
#else
-static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci,
+ enum dw_pcie_device_mode mode)
{
}
static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 678d510a261d..93171a392879 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -8,6 +8,7 @@
* Author: Simon Xue <xxm@rock-chips.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/irqchip/chained_irq.h>
@@ -21,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include "../../pci.h"
#include "pcie-designware.h"
/*
@@ -33,26 +35,36 @@
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
-#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
-#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
-#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
-#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
-#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
-#define PCIE_CLIENT_INTR_MASK_MISC 0x24
-#define PCIE_SMLH_LINKUP BIT(16)
-#define PCIE_RDLH_LINKUP BIT(17)
-#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
-#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
-#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
-#define PCIE_L0S_ENTRY 0x11
-#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+/* General Control Register */
+#define PCIE_CLIENT_GENERAL_CON 0x0
+#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
+#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
+
+/* Interrupt Status Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+
+/* Interrupt Status Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
+#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
+#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
+
+/* Interrupt Mask Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
-#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+
+/* Interrupt Mask Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_MASK_MISC 0x24
+
+/* Hot Reset Control Register */
#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+
+/* LTSSM Status Register */
#define PCIE_CLIENT_LTSSM_STATUS 0x300
-#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
-#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+#define PCIE_LINKUP 0x3
+#define PCIE_LINKUP_MASK GENMASK(17, 16)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
struct dw_pcie pci;
@@ -163,25 +175,36 @@ static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
}
static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
}
-static int rockchip_pcie_link_up(struct dw_pcie *pci)
+static bool rockchip_pcie_link_up(struct dw_pcie *pci)
{
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
u32 val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
- (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
- return 1;
+ return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP;
+}
- return 0;
+static void rockchip_pcie_enable_l0s(struct dw_pcie *pci)
+{
+ u32 cap, lnkcap;
+
+ /* Enable L0S capability for all SoCs */
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ if (cap) {
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap |= PCI_EXP_LNKCAP_ASPM_L0S;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
}
static int rockchip_pcie_start_link(struct dw_pcie *pci)
@@ -202,7 +225,7 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
* We need more extra time as before, rather than setting just
* 100us as we don't know how long should the device need to reset.
*/
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
return 0;
@@ -233,6 +256,8 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
rockchip);
+ rockchip_pcie_enable_l0s(pci);
+
return 0;
}
@@ -263,16 +288,14 @@ static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
dev_err(dev, "failed to hide ATS capability\n");
}
-static void rockchip_pcie_ep_pre_init(struct dw_pcie_ep *ep)
-{
- rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
-}
-
static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
+ rockchip_pcie_enable_l0s(pci);
+ rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
+
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
};
@@ -342,7 +365,6 @@ rockchip_pcie_get_features(struct dw_pcie_ep *ep)
static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {
.init = rockchip_pcie_ep_init,
- .pre_init = rockchip_pcie_ep_pre_init,
.raise_irq = rockchip_pcie_raise_irq,
.get_features = rockchip_pcie_get_features,
};
@@ -410,8 +432,8 @@ static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
{
- phy_exit(rockchip->phy);
phy_power_off(rockchip->phy);
+ phy_exit(rockchip->phy);
}
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -426,7 +448,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
struct dw_pcie *pci = &rockchip->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
- u32 reg, val;
+ u32 reg;
reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
@@ -435,8 +457,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
- val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ if (rockchip_pcie_link_up(pci)) {
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
/* Rescan the bus to enumerate endpoint devices */
pci_lock_rescan_remove();
@@ -453,7 +474,7 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
struct rockchip_pcie *rockchip = arg;
struct dw_pcie *pci = &rockchip->pci;
struct device *dev = pci->dev;
- u32 reg, val;
+ u32 reg;
reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
@@ -467,8 +488,7 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
}
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
- val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ if (rockchip_pcie_link_up(pci)) {
dev_dbg(dev, "link up\n");
dw_pcie_ep_linkup(&pci->ep);
}
@@ -505,7 +525,7 @@ static int rockchip_pcie_configure_rc(struct platform_device *pdev,
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
pp = &rockchip->pci.pp;
pp->ops = &rockchip_pcie_host_ops;
@@ -551,7 +571,7 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
rockchip->pci.ep.page_size = SZ_64K;
@@ -601,6 +621,10 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->pci.ops = &dw_pcie_ops;
rockchip->data = data;
+ /* Default N_FTS value (210) is broken, override it to 255 */
+ rockchip->pci.n_fts[0] = 255; /* Gen1 */
+ rockchip->pci.n_fts[1] = 255; /* Gen2+ */
+
ret = rockchip_pcie_resource_get(pdev, rockchip);
if (ret)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 8904b5b85ee5..3c17897e56fc 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -15,6 +15,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 1f2f4c28a949..a52071589377 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -151,7 +151,7 @@ static struct pci_ops histb_pci_ops = {
.write = histb_pcie_wr_own_conf,
};
-static int histb_pcie_link_up(struct dw_pcie *pci)
+static bool histb_pcie_link_up(struct dw_pcie *pci)
{
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
@@ -160,11 +160,8 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
status &= PCIE_LTSSM_STATE_MASK;
- if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
- (status == PCIE_LTSSM_STATE_ACTIVE))
- return 1;
-
- return 0;
+ return ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE));
}
static int histb_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 278205db60a2..67dd3337b447 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -101,7 +101,7 @@ static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
}
-static int keembay_pcie_link_up(struct dw_pcie *pci)
+static bool keembay_pcie_link_up(struct dw_pcie *pci)
{
struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
u32 val;
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d0e6a3811b00..91559c8b1866 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -586,16 +586,13 @@ static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
}
-static int kirin_pcie_link_up(struct dw_pcie *pci)
+static bool kirin_pcie_link_up(struct dw_pcie *pci)
{
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
u32 val;
regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
- if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
- return 1;
-
- return 0;
+ return (val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE;
}
static int kirin_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 46b1c6d19974..bf7c6ac0f3e3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -60,6 +60,7 @@
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
#define PARF_INT_ALL_5_MASK 0x2dcc
+#define PARF_INT_ALL_3_MASK 0x2e18
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_DOWN BIT(1)
@@ -132,6 +133,9 @@
/* PARF_INT_ALL_5_MASK fields */
#define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
+/* PARF_INT_ALL_3_MASK fields */
+#define PARF_INT_ALL_3_PTM_UPDATING BIT(4)
+
/* ELBI registers */
#define ELBI_SYS_STTS 0x08
#define ELBI_CS2_ENABLE 0xa4
@@ -261,7 +265,7 @@ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
}
}
-static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_dw_link_up(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
u32 reg;
@@ -497,6 +501,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK);
}
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_3_MASK);
+ val &= ~PARF_INT_ALL_3_PTM_UPDATING;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_3_MASK);
+
ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index dc98ae63362d..c789e3f85655 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -289,7 +289,7 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
/* Ensure that PERST has been asserted for at least 100 ms */
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(pcie->reset, 0);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
@@ -1221,12 +1221,12 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_link_up(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
}
static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
@@ -1840,6 +1840,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index fc872dd35029..18055807a4f5 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -87,7 +87,7 @@ struct rcar_gen4_pcie {
#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
/* Common */
-static int rcar_gen4_pcie_link_up(struct dw_pcie *dw)
+static bool rcar_gen4_pcie_link_up(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
u32 val, mask;
@@ -403,6 +403,7 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
.msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
.bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_1M,
};
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index ff986ced56b2..01794a9d3ad2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -110,15 +110,12 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc
MSI_CTRL_INT, &app_reg->int_mask);
}
-static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+static bool spear13xx_pcie_link_up(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
- if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
- return 1;
-
- return 0;
+ return readl(&app_reg->app_status_1) & XMLH_LINK_UP;
}
static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 5103995cd6c7..4f26086f25da 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -713,7 +713,16 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
static void init_debugfs(struct tegra_pcie_dw *pcie)
{
- debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ struct device *dev = pcie->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+
+ debugfs_create_devm_seqfile(dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
@@ -1027,12 +1036,12 @@ retry_link:
return 0;
}
-static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+static bool tegra_pcie_dw_link_up(struct dw_pcie *pci)
{
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
}
static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
@@ -1634,7 +1643,6 @@ static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
struct device *dev = pcie->dev;
- char *name;
int ret;
pm_runtime_enable(dev);
@@ -1664,13 +1672,6 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
goto fail_host_init;
}
- name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
- if (!name) {
- ret = -ENOMEM;
- goto fail_host_init;
- }
-
- pcie->debugfs = debugfs_create_dir(name, NULL);
init_debugfs(pcie);
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 43b28f826edd..297e7a3d9b36 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -135,7 +135,7 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
return 0;
}
-static int uniphier_pcie_link_up(struct dw_pcie *pci)
+static bool uniphier_pcie_link_up(struct dw_pcie *pci)
{
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
u32 val, mask;
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index 318c278e65c8..cdeac6177143 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -121,13 +121,13 @@ static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
return readl_relaxed(pcie->mpu_base + reg);
}
-static int visconti_pcie_link_up(struct dw_pcie *pci)
+static bool visconti_pcie_link_up(struct dw_pcie *pci)
{
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
void __iomem *addr = pcie->ulreg_base;
u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
- return !!(val & PCIE_UL_S_L0);
+ return val & PCIE_UL_S_L0;
}
static int visconti_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index 5af22bee913b..4919b27eaf44 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -53,18 +53,13 @@ static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
}
-static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
+static bool ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
{
struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
u32 state;
state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
- state = state & PF_DBG_LTSSM_MASK;
-
- if (state == PF_DBG_LTSSM_L0)
- return 1;
-
- return 0;
+ return (state & PF_DBG_LTSSM_MASK) == PF_DBG_LTSSM_L0;
}
static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
@@ -174,8 +169,7 @@ static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
static void ls_g4_pcie_reset(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
struct mobiveil_pcie *mv_pci = &pcie->pci;
u16 ctrl;
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index e63abb887ee3..662f17f9bf65 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -160,7 +160,7 @@ struct mobiveil_root_port {
};
struct mobiveil_pab_ops {
- int (*link_up)(struct mobiveil_pcie *pcie);
+ bool (*link_up)(struct mobiveil_pcie *pcie);
};
struct mobiveil_pcie {
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index f441bfd6f96a..b0992325dd65 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Generic PCI host driver common code
+ * Common library for PCI host controller drivers
*
* Copyright (C) 2014 ARM Limited
*
@@ -15,6 +15,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static void gen_pci_unmap_cfg(void *ptr)
{
pci_ecam_free((struct pci_config_window *)ptr);
@@ -49,23 +51,17 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
return cfg;
}
-int pci_host_common_probe(struct platform_device *pdev)
+int pci_host_common_init(struct platform_device *pdev,
+ const struct pci_ecam_ops *ops)
{
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
- const struct pci_ecam_ops *ops;
-
- ops = of_device_get_match_data(&pdev->dev);
- if (!ops)
- return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
- platform_set_drvdata(pdev, bridge);
-
of_pci_check_probe_only();
/* Parse and map our Configuration Space windows */
@@ -73,6 +69,8 @@ int pci_host_common_probe(struct platform_device *pdev)
if (IS_ERR(cfg))
return PTR_ERR(cfg);
+ platform_set_drvdata(pdev, bridge);
+
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
bridge->enable_device = ops->enable_device;
@@ -81,6 +79,18 @@ int pci_host_common_probe(struct platform_device *pdev)
return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(pci_host_common_init);
+
+int pci_host_common_probe(struct platform_device *pdev)
+{
+ const struct pci_ecam_ops *ops;
+
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -ENODEV;
+
+ return pci_host_common_init(pdev, ops);
+}
EXPORT_SYMBOL_GPL(pci_host_common_probe);
void pci_host_common_remove(struct platform_device *pdev)
@@ -94,5 +104,5 @@ void pci_host_common_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(pci_host_common_remove);
-MODULE_DESCRIPTION("Generic PCI host common driver");
+MODULE_DESCRIPTION("Common library for PCI host controller drivers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-common.h b/drivers/pci/controller/pci-host-common.h
new file mode 100644
index 000000000000..65bd9e032353
--- /dev/null
+++ b/drivers/pci/controller/pci-host-common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common library for PCI host controller drivers
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#ifndef _PCI_HOST_COMMON_H
+#define _PCI_HOST_COMMON_H
+
+struct pci_ecam_ops;
+
+int pci_host_common_probe(struct platform_device *pdev);
+int pci_host_common_init(struct platform_device *pdev,
+ const struct pci_ecam_ops *ops);
+void pci_host_common_remove(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 4051b9b61dac..c1bc0d34348f 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -14,6 +14,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.pci_ops = {
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index e1eaa24559a2..ef5d655a0052 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -50,6 +50,7 @@
#include <linux/irqdomain.h>
#include <linux/acpi.h>
#include <linux/sizes.h>
+#include <linux/of_irq.h>
#include <asm/mshyperv.h>
/*
@@ -309,8 +310,6 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size);
void *compl_ctxt;
-
- struct pci_message message[];
};
/*
@@ -817,9 +816,17 @@ static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
int ret;
fwspec.fwnode = domain->parent->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = hwirq;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ if (is_of_node(fwspec.fwnode)) {
+ /* SPI lines for OF translations start at offset 32 */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq - 32;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+ } else {
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ }
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (ret)
@@ -887,10 +894,44 @@ static const struct irq_domain_ops hv_pci_domain_ops = {
.activate = hv_pci_vec_irq_domain_activate,
};
+#ifdef CONFIG_OF
+
+static struct irq_domain *hv_pci_of_irq_domain_parent(void)
+{
+ struct device_node *parent;
+ struct irq_domain *domain;
+
+ parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
+ if (!parent)
+ return NULL;
+ domain = irq_find_host(parent);
+ of_node_put(parent);
+
+ return domain;
+}
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
+{
+ acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
+
+ gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
+ if (!gsi_domain_disp_fn)
+ return NULL;
+ return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
+ DOMAIN_BUS_ANY);
+}
+
+#endif
+
static int hv_pci_irqchip_init(void)
{
static struct hv_pci_chip_data *chip_data;
struct fwnode_handle *fn = NULL;
+ struct irq_domain *irq_domain_parent = NULL;
int ret = -ENOMEM;
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
@@ -907,9 +948,24 @@ static int hv_pci_irqchip_init(void)
* way to ensure that all the corresponding devices are also gone and
* no interrupts will be generated.
*/
- hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
- fn, &hv_pci_domain_ops,
- chip_data);
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ irq_domain_parent = hv_pci_acpi_irq_domain_parent();
+#endif
+#ifdef CONFIG_OF
+ if (!irq_domain_parent)
+ irq_domain_parent = hv_pci_of_irq_domain_parent();
+#endif
+ if (!irq_domain_parent) {
+ WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
+ ret = -EINVAL;
+ goto free_chip;
+ }
+
+ hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
+ HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
if (!hv_msi_gic_irq_domain) {
pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
@@ -1438,7 +1494,7 @@ static int hv_read_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_read_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- read_blk = (struct pci_read_block *)&pkt.pkt.message;
+ read_blk = (struct pci_read_block *)pkt.buf;
read_blk->message_type.type = PCI_READ_BLOCK;
read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
read_blk->block_id = block_id;
@@ -1518,7 +1574,7 @@ static int hv_write_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_write_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- write_blk = (struct pci_write_block *)&pkt.pkt.message;
+ write_blk = (struct pci_write_block *)pkt.buf;
write_blk->message_type.type = PCI_WRITE_BLOCK;
write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
write_blk->block_id = block_id;
@@ -1599,7 +1655,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
return;
}
memset(&ctxt, 0, sizeof(ctxt));
- int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+ int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
@@ -2482,7 +2538,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
comp_pkt.hpdev = hpdev;
pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements;
- res_req = (struct pci_child_message *)&pkt.init_packet.message;
+ res_req = (struct pci_child_message *)pkt.buffer;
res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot;
@@ -2860,7 +2916,7 @@ static void hv_eject_device_work(struct work_struct *work)
pci_destroy_slot(hpdev->pci_slot);
memset(&ctxt, 0, sizeof(ctxt));
- ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
@@ -3118,7 +3174,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev,
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- version_req = (struct pci_version_request *)&pkt->message;
+ version_req = (struct pci_version_request *)(pkt + 1);
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
for (i = 0; i < num_version; i++) {
@@ -3340,7 +3396,7 @@ enter_d0_retry:
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+ d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start;
@@ -3498,20 +3554,20 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
- (struct pci_resources_assigned *)&pkt->message;
+ (struct pci_resources_assigned *)(pkt + 1);
res_assigned->message_type.type =
PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
} else {
res_assigned2 =
- (struct pci_resources_assigned2 *)&pkt->message;
+ (struct pci_resources_assigned2 *)(pkt + 1);
res_assigned2->message_type.type =
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
put_pcichild(hpdev);
- ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+ ret = vmbus_sendpacket(hdev->channel, pkt + 1,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3809,6 +3865,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
+ struct pci_message *msg;
struct hv_pci_compl comp_pkt;
struct hv_pci_dev *hpdev, *tmp;
unsigned long flags;
@@ -3854,10 +3911,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt;
- pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
+ msg = (struct pci_message *)pkt.buffer;
+ msg->type = PCI_BUS_D0EXIT;
- ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
- sizeof(struct pci_message),
+ ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
(unsigned long)&pkt.teardown_packet,
&trans_id, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 60da24ba0a19..a4a2bac4f4b2 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1179,37 +1179,29 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
unsigned int *tgt,
unsigned int *attr)
{
- const int na = 3, ns = 2;
- const __be32 *range;
- int rlen, nranges, rangesz, pna, i;
+ struct of_range range;
+ struct of_range_parser parser;
*tgt = -1;
*attr = -1;
- range = of_get_property(np, "ranges", &rlen);
- if (!range)
+ if (of_pci_range_parser_init(&parser, np))
return -EINVAL;
- pna = of_n_addr_cells(np);
- rangesz = pna + na + ns;
- nranges = rlen / sizeof(__be32) / rangesz;
-
- for (i = 0; i < nranges; i++, range += rangesz) {
- u32 flags = of_read_number(range, 1);
- u32 slot = of_read_number(range + 1, 1);
- u64 cpuaddr = of_read_number(range + na, pna);
+ for_each_of_range(&parser, &range) {
unsigned long rtype;
+ u32 slot = upper_32_bits(range.bus_addr);
- if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
+ if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_IO)
rtype = IORESOURCE_IO;
- else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+ else if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_MEM32)
rtype = IORESOURCE_MEM;
else
continue;
if (slot == PCI_SLOT(devfn) && type == rtype) {
- *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
- *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ *tgt = DT_CPUADDR_TO_TARGET(range.cpu_addr);
+ *attr = DT_CPUADDR_TO_ATTR(range.cpu_addr);
return 0;
}
}
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index 08161065a89c..b5b4a958e6a2 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -11,6 +11,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
static void set_val(u32 v, int where, int size, u32 *val)
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index f1bd5de67997..5fa037fb61dc 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
+#include "pci-host-common.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 3d412a931774..77fe73976654 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -18,6 +18,7 @@
* Author: Marc Zyngier <maz@kernel.org>
*/
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/iopoll.h>
@@ -30,6 +31,9 @@
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
+#include "pci-host-common.h"
+
+/* T8103 (original M1) and related SoCs */
#define CORE_RC_PHYIF_CTL 0x00024
#define CORE_RC_PHYIF_CTL_RUN BIT(0)
#define CORE_RC_PHYIF_STAT 0x00028
@@ -40,14 +44,18 @@
#define CORE_RC_STAT_READY BIT(0)
#define CORE_FABRIC_STAT 0x04000
#define CORE_FABRIC_STAT_MASK 0x001F001F
-#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
-#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
-#define CORE_LANE_CFG_REFCLK1REQ BIT(1)
-#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
-#define CORE_LANE_CFG_REFCLK1ACK BIT(3)
-#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
-#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
-#define CORE_LANE_CTL_CFGACC BIT(15)
+
+#define CORE_PHY_DEFAULT_BASE(port) (0x84000 + 0x4000 * (port))
+
+#define PHY_LANE_CFG 0x00000
+#define PHY_LANE_CFG_REFCLK0REQ BIT(0)
+#define PHY_LANE_CFG_REFCLK1REQ BIT(1)
+#define PHY_LANE_CFG_REFCLK0ACK BIT(2)
+#define PHY_LANE_CFG_REFCLK1ACK BIT(3)
+#define PHY_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
+#define PHY_LANE_CFG_REFCLKCGEN (BIT(30) | BIT(31))
+#define PHY_LANE_CTL 0x00004
+#define PHY_LANE_CTL_CFGACC BIT(15)
#define PORT_LTSSMCTL 0x00080
#define PORT_LTSSMCTL_START BIT(0)
@@ -101,7 +109,7 @@
#define PORT_REFCLK_CGDIS BIT(8)
#define PORT_PERST 0x00814
#define PORT_PERST_OFF BIT(0)
-#define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
+#define PORT_RID2SID 0x00828
#define PORT_RID2SID_VALID BIT(31)
#define PORT_RID2SID_SID_SHIFT 16
#define PORT_RID2SID_BUS_SHIFT 8
@@ -119,7 +127,15 @@
#define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
#define PORT_PREFMEM_ENABLE 0x00994
-#define MAX_RID2SID 64
+/* T602x (M2-pro and co) */
+#define PORT_T602X_MSIADDR 0x016c
+#define PORT_T602X_MSIADDR_HI 0x0170
+#define PORT_T602X_PERST 0x082c
+#define PORT_T602X_RID2SID 0x3000
+#define PORT_T602X_MSIMAP 0x3800
+
+#define PORT_MSIMAP_ENABLE BIT(31)
+#define PORT_MSIMAP_TARGET GENMASK(7, 0)
/*
* The doorbell address is set to 0xfffff000, which by convention
@@ -130,10 +146,45 @@
*/
#define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
+struct hw_info {
+ u32 phy_lane_ctl;
+ u32 port_msiaddr;
+ u32 port_msiaddr_hi;
+ u32 port_refclk;
+ u32 port_perst;
+ u32 port_rid2sid;
+ u32 port_msimap;
+ u32 max_rid2sid;
+};
+
+static const struct hw_info t8103_hw = {
+ .phy_lane_ctl = PHY_LANE_CTL,
+ .port_msiaddr = PORT_MSIADDR,
+ .port_msiaddr_hi = 0,
+ .port_refclk = PORT_REFCLK,
+ .port_perst = PORT_PERST,
+ .port_rid2sid = PORT_RID2SID,
+ .port_msimap = 0,
+ .max_rid2sid = 64,
+};
+
+static const struct hw_info t602x_hw = {
+ .phy_lane_ctl = 0,
+ .port_msiaddr = PORT_T602X_MSIADDR,
+ .port_msiaddr_hi = PORT_T602X_MSIADDR_HI,
+ .port_refclk = 0,
+ .port_perst = PORT_T602X_PERST,
+ .port_rid2sid = PORT_T602X_RID2SID,
+ .port_msimap = PORT_T602X_MSIMAP,
+ /* 16 on t602x, guess for autodetect on future HW */
+ .max_rid2sid = 512,
+};
+
struct apple_pcie {
struct mutex lock;
struct device *dev;
void __iomem *base;
+ const struct hw_info *hw;
unsigned long *bitmap;
struct list_head ports;
struct completion event;
@@ -142,12 +193,14 @@ struct apple_pcie {
};
struct apple_pcie_port {
+ raw_spinlock_t lock;
struct apple_pcie *pcie;
struct device_node *np;
void __iomem *base;
+ void __iomem *phy;
struct irq_domain *domain;
struct list_head entry;
- DECLARE_BITMAP(sid_map, MAX_RID2SID);
+ unsigned long *sid_map;
int sid_map_sz;
int idx;
};
@@ -233,14 +286,16 @@ static void apple_port_irq_mask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_set(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static void apple_port_irq_unmask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_clear(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static bool hwirq_is_intx(unsigned int hwirq)
@@ -344,7 +399,9 @@ static void apple_port_irq_handler(struct irq_desc *desc)
static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
{
struct fwnode_handle *fwnode = &port->np->fwnode;
+ struct apple_pcie *pcie = port->pcie;
unsigned int irq;
+ u32 val = 0;
/* FIXME: consider moving each interrupt under each port */
irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
@@ -359,20 +416,31 @@ static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
return -ENOMEM;
/* Disable all interrupts */
- writel_relaxed(~0, port->base + PORT_INTMSKSET);
+ writel_relaxed(~0, port->base + PORT_INTMSK);
writel_relaxed(~0, port->base + PORT_INTSTAT);
+ writel_relaxed(~0, port->base + PORT_LINKCMDSTS);
irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
/* Configure MSI base address */
BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
- writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
+ writel_relaxed(lower_32_bits(DOORBELL_ADDR),
+ port->base + pcie->hw->port_msiaddr);
+ if (pcie->hw->port_msiaddr_hi)
+ writel_relaxed(0, port->base + pcie->hw->port_msiaddr_hi);
/* Enable MSIs, shared between all ports */
- writel_relaxed(0, port->base + PORT_MSIBASE);
- writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
- PORT_MSICFG_EN, port->base + PORT_MSICFG);
+ if (pcie->hw->port_msimap) {
+ for (int i = 0; i < pcie->nvecs; i++)
+ writel_relaxed(FIELD_PREP(PORT_MSIMAP_TARGET, i) |
+ PORT_MSIMAP_ENABLE,
+ port->base + pcie->hw->port_msimap + 4 * i);
+ } else {
+ writel_relaxed(0, port->base + PORT_MSIBASE);
+ val = ilog2(pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT;
+ }
+ writel_relaxed(val | PORT_MSICFG_EN, port->base + PORT_MSICFG);
return 0;
}
@@ -439,43 +507,47 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
u32 stat;
int res;
- res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
- stat & CORE_RC_PHYIF_STAT_REFCLK,
- 100, 50000);
- if (res < 0)
- return res;
+ if (pcie->hw->phy_lane_ctl)
+ rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
- rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
- rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
+ rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG);
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK0ACK,
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK0ACK,
100, 50000);
if (res < 0)
return res;
- rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK1ACK,
+ rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG);
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK1ACK,
100, 50000);
if (res < 0)
return res;
- rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+ if (pcie->hw->phy_lane_ctl)
+ rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
+
+ rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG);
- rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
- rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
+ if (pcie->hw->port_refclk)
+ rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk);
return 0;
}
+static void __iomem *port_rid2sid_addr(struct apple_pcie_port *port, int idx)
+{
+ return port->base + port->pcie->hw->port_rid2sid + 4 * idx;
+}
+
static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
int idx, u32 val)
{
- writel_relaxed(val, port->base + PORT_RID2SID(idx));
+ writel_relaxed(val, port_rid2sid_addr(port, idx));
/* Read back to ensure completion of the write */
- return readl_relaxed(port->base + PORT_RID2SID(idx));
+ return readl_relaxed(port_rid2sid_addr(port, idx));
}
static int apple_pcie_setup_port(struct apple_pcie *pcie,
@@ -484,6 +556,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
struct platform_device *platform = to_platform_device(pcie->dev);
struct apple_pcie_port *port;
struct gpio_desc *reset;
+ struct resource *res;
+ char name[16];
u32 stat, idx;
int ret, i;
@@ -496,6 +570,10 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
if (!port)
return -ENOMEM;
+ port->sid_map = devm_bitmap_zalloc(pcie->dev, pcie->hw->max_rid2sid, GFP_KERNEL);
+ if (!port->sid_map)
+ return -ENOMEM;
+
ret = of_property_read_u32_index(np, "reg", 0, &idx);
if (ret)
return ret;
@@ -505,14 +583,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
port->pcie = pcie;
port->np = np;
- port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
+ raw_spin_lock_init(&port->lock);
+
+ snprintf(name, sizeof(name), "port%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (!res)
+ res = platform_get_resource(platform, IORESOURCE_MEM, port->idx + 2);
+
+ port->base = devm_ioremap_resource(&platform->dev, res);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
+ snprintf(name, sizeof(name), "phy%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (res)
+ port->phy = devm_ioremap_resource(&platform->dev, res);
+ else
+ port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx);
+
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
/* Assert PERST# before setting up the clock */
- gpiod_set_value(reset, 1);
+ gpiod_set_value_cansleep(reset, 1);
ret = apple_pcie_setup_refclk(pcie, port);
if (ret < 0)
@@ -522,8 +614,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
usleep_range(100, 200);
/* Deassert PERST# */
- rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
- gpiod_set_value(reset, 0);
+ rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst);
+ gpiod_set_value_cansleep(reset, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
@@ -535,7 +627,11 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
}
- rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
+ if (pcie->hw->port_refclk)
+ rmw_clear(PORT_REFCLK_CGDIS, port->base + pcie->hw->port_refclk);
+ else
+ rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG);
+
rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
ret = apple_pcie_port_setup_irq(port);
@@ -543,7 +639,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
/* Reset all RID/SID mappings, and check for RAZ/WI registers */
- for (i = 0; i < MAX_RID2SID; i++) {
+ for (i = 0; i < pcie->hw->max_rid2sid; i++) {
if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
break;
apple_pcie_rid2sid_write(port, i, 0);
@@ -556,6 +652,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
list_add_tail(&port->entry, &pcie->ports);
init_completion(&pcie->event);
+ /* In the success path, we keep a reference to np around */
+ of_node_get(np);
+
ret = apple_pcie_port_register_irqs(port);
WARN_ON(ret);
@@ -693,7 +792,7 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
u32 val;
- val = readl_relaxed(port->base + PORT_RID2SID(idx));
+ val = readl_relaxed(port_rid2sid_addr(port, idx));
if ((val & 0xffff) == rid) {
apple_pcie_rid2sid_write(port, idx, 0);
bitmap_release_region(port->sid_map, idx, 0);
@@ -707,34 +806,14 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci
static int apple_pcie_init(struct pci_config_window *cfg)
{
+ struct apple_pcie *pcie = cfg->priv;
struct device *dev = cfg->parent;
- struct platform_device *platform = to_platform_device(dev);
- struct apple_pcie *pcie;
int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
-
- pcie->dev = dev;
-
- mutex_init(&pcie->lock);
-
- pcie->base = devm_platform_ioremap_resource(platform, 1);
- if (IS_ERR(pcie->base))
- return PTR_ERR(pcie->base);
-
- cfg->priv = pcie;
- INIT_LIST_HEAD(&pcie->ports);
-
- ret = apple_msi_init(pcie);
- if (ret)
- return ret;
-
- for_each_child_of_node_scoped(dev->of_node, of_port) {
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
ret = apple_pcie_setup_port(pcie, of_port);
if (ret) {
- dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
+ dev_err(dev, "Port %pOF setup fail: %d\n", of_port, ret);
return ret;
}
}
@@ -753,14 +832,44 @@ static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
}
};
+static int apple_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = dev;
+ pcie->hw = of_device_get_match_data(dev);
+ if (!pcie->hw)
+ return -ENODEV;
+ pcie->base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ mutex_init(&pcie->lock);
+ INIT_LIST_HEAD(&pcie->ports);
+ dev_set_drvdata(dev, pcie);
+
+ ret = apple_msi_init(pcie);
+ if (ret)
+ return ret;
+
+ return pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops);
+}
+
static const struct of_device_id apple_pcie_of_match[] = {
- { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
+ { .compatible = "apple,t6020-pcie", .data = &t602x_hw },
+ { .compatible = "apple,pcie", .data = &t8103_hw },
{ }
};
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
static struct platform_driver apple_pcie_driver = {
- .probe = pci_host_common_probe,
+ .probe = apple_pcie_probe,
.driver = {
.name = "pcie-apple",
.of_match_table = apple_pcie_of_match,
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index c5e0d025bc43..a8a966844cf3 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -256,15 +256,15 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
+ u8 mmc = order_base_2(nr_irqs);
u32 flags;
flags = rcar_pci_read_reg(pcie, MSICAP(fn));
- flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
+ flags |= mmc << MSICAP0_MMESCAP_OFFSET;
rcar_pci_write_reg(pcie, flags, MSICAP(fn));
return 0;
@@ -280,7 +280,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & MSICAP0_MSIE))
return -EINVAL;
- return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+ return 1 << ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 85ea36df2f59..55416b8311dd 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -308,10 +308,11 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
}
static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 multi_msg_cap)
+ u8 nr_irqs)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
+ u8 mmc = order_base_2(nr_irqs);
u32 flags;
flags = rockchip_pcie_read(rockchip,
@@ -319,7 +320,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
flags |=
- (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ (mmc << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
(PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
rockchip_pcie_write(rockchip, flags,
@@ -340,8 +341,8 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
return -EINVAL;
- return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
- ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+ return 1 << ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
}
static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
@@ -694,6 +695,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = false,
+ .intx_capable = true,
.align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
};
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 14954f43e5e9..5864a20323f2 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -319,11 +319,12 @@ static const char * const rockchip_pci_pm_rsts[] = {
"aclk",
};
+/* NOTE: Do not reorder the deassert sequence of the following reset pins */
static const char * const rockchip_pci_core_rsts[] = {
- "mgmt-sticky",
- "core",
- "mgmt",
"pipe",
+ "mgmt",
+ "core",
+ "mgmt-sticky",
};
struct rockchip_pcie {
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
index 3fdfffdf0270..24bbf93b8051 100644
--- a/drivers/pci/controller/plda/pcie-microchip-host.c
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -23,6 +23,7 @@
#include <linux/wordpart.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#include "pcie-plda.h"
#define MC_MAX_NUM_INBOUND_WINDOWS 8
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 73047316889e..9f4190501395 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -6,30 +6,13 @@
/*
* On the state of PCI's devres implementation:
*
- * The older devres API for PCI has two significant problems:
+ * The older PCI devres API has one significant problem:
*
- * 1. It is very strongly tied to the statically allocated mapping table in
- * struct pcim_iomap_devres below. This is mostly solved in the sense of the
- * pcim_ functions in this file providing things like ranged mapping by
- * bypassing this table, whereas the functions that were present in the old
- * API still enter the mapping addresses into the table for users of the old
- * API.
- *
- * 2. The region-request-functions in pci.c do become managed IF the device has
- * been enabled with pcim_enable_device() instead of pci_enable_device().
- * This resulted in the API becoming inconsistent: Some functions have an
- * obviously managed counter-part (e.g., pci_iomap() <-> pcim_iomap()),
- * whereas some don't and are never managed, while others don't and are
- * _sometimes_ managed (e.g. pci_request_region()).
- *
- * Consequently, in the new API, region requests performed by the pcim_
- * functions are automatically cleaned up through the devres callback
- * pcim_addr_resource_release().
- *
- * Users of pcim_enable_device() + pci_*region*() are redirected in
- * pci.c to the managed functions here in this file. This isn't exactly
- * perfect, but the only alternative way would be to port ALL drivers
- * using said combination to pcim_ functions.
+ * It is very strongly tied to the statically allocated mapping table in struct
+ * pcim_iomap_devres below. This is mostly solved in the sense of the pcim_
+ * functions in this file providing things like ranged mapping by bypassing
+ * this table, whereas the functions that were present in the old API still
+ * enter the mapping addresses into the table for users of the old API.
*
* TODO:
* Remove the legacy table entirely once all calls to pcim_iomap_table() in
@@ -87,104 +70,6 @@ static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
res->bar = -1;
}
-/*
- * The following functions, __pcim_*_region*, exist as counterparts to the
- * versions from pci.c - which, unfortunately, can be in "hybrid mode", i.e.,
- * sometimes managed, sometimes not.
- *
- * To separate the APIs cleanly, we define our own, simplified versions here.
- */
-
-/**
- * __pcim_request_region_range - Request a ranged region
- * @pdev: PCI device the region belongs to
- * @bar: BAR the range is within
- * @offset: offset from the BAR's start address
- * @maxlen: length in bytes, beginning at @offset
- * @name: name of the driver requesting the resource
- * @req_flags: flags for the request, e.g., for kernel-exclusive requests
- *
- * Returns: 0 on success, a negative error code on failure.
- *
- * Request a range within a device's PCI BAR. Sanity check the input.
- */
-static int __pcim_request_region_range(struct pci_dev *pdev, int bar,
- unsigned long offset,
- unsigned long maxlen,
- const char *name, int req_flags)
-{
- resource_size_t start = pci_resource_start(pdev, bar);
- resource_size_t len = pci_resource_len(pdev, bar);
- unsigned long dev_flags = pci_resource_flags(pdev, bar);
-
- if (start == 0 || len == 0) /* Unused BAR. */
- return 0;
- if (len <= offset)
- return -EINVAL;
-
- start += offset;
- len -= offset;
-
- if (len > maxlen && maxlen != 0)
- len = maxlen;
-
- if (dev_flags & IORESOURCE_IO) {
- if (!request_region(start, len, name))
- return -EBUSY;
- } else if (dev_flags & IORESOURCE_MEM) {
- if (!__request_mem_region(start, len, name, req_flags))
- return -EBUSY;
- } else {
- /* That's not a device we can request anything on. */
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __pcim_release_region_range(struct pci_dev *pdev, int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(pdev, bar);
- resource_size_t len = pci_resource_len(pdev, bar);
- unsigned long flags = pci_resource_flags(pdev, bar);
-
- if (len <= offset || start == 0)
- return;
-
- if (len == 0 || maxlen == 0) /* This an unused BAR. Do nothing. */
- return;
-
- start += offset;
- len -= offset;
-
- if (len > maxlen)
- len = maxlen;
-
- if (flags & IORESOURCE_IO)
- release_region(start, len);
- else if (flags & IORESOURCE_MEM)
- release_mem_region(start, len);
-}
-
-static int __pcim_request_region(struct pci_dev *pdev, int bar,
- const char *name, int flags)
-{
- unsigned long offset = 0;
- unsigned long len = pci_resource_len(pdev, bar);
-
- return __pcim_request_region_range(pdev, bar, offset, len, name, flags);
-}
-
-static void __pcim_release_region(struct pci_dev *pdev, int bar)
-{
- unsigned long offset = 0;
- unsigned long len = pci_resource_len(pdev, bar);
-
- __pcim_release_region_range(pdev, bar, offset, len);
-}
-
static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -192,11 +77,11 @@ static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
switch (res->type) {
case PCIM_ADDR_DEVRES_TYPE_REGION:
- __pcim_release_region(pdev, res->bar);
+ pci_release_region(pdev, res->bar);
break;
case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
pci_iounmap(pdev, res->baseaddr);
- __pcim_release_region(pdev, res->bar);
+ pci_release_region(pdev, res->bar);
break;
case PCIM_ADDR_DEVRES_TYPE_MAPPING:
pci_iounmap(pdev, res->baseaddr);
@@ -735,7 +620,7 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
res->bar = bar;
- ret = __pcim_request_region(pdev, bar, name, 0);
+ ret = pci_request_region(pdev, bar, name);
if (ret != 0)
goto err_region;
@@ -749,7 +634,7 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
return res->baseaddr;
err_iomap:
- __pcim_release_region(pdev, bar);
+ pci_release_region(pdev, bar);
err_region:
pcim_addr_devres_free(res);
@@ -823,8 +708,20 @@ err:
}
EXPORT_SYMBOL(pcim_iomap_regions);
-static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
- int request_flags)
+/**
+ * pcim_request_region - Request a PCI BAR
+ * @pdev: PCI device to request region for
+ * @bar: Index of BAR to request
+ * @name: Name of the driver requesting the resource
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request region specified by @bar.
+ *
+ * The region will automatically be released on driver detach. If desired,
+ * release manually only with pcim_release_region().
+ */
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
{
int ret;
struct pcim_addr_devres *res;
@@ -838,7 +735,7 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
res->bar = bar;
- ret = __pcim_request_region(pdev, bar, name, request_flags);
+ ret = pci_request_region(pdev, bar, name);
if (ret != 0) {
pcim_addr_devres_free(res);
return ret;
@@ -847,45 +744,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
devres_add(&pdev->dev, res);
return 0;
}
-
-/**
- * pcim_request_region - Request a PCI BAR
- * @pdev: PCI device to request region for
- * @bar: Index of BAR to request
- * @name: Name of the driver requesting the resource
- *
- * Returns: 0 on success, a negative error code on failure.
- *
- * Request region specified by @bar.
- *
- * The region will automatically be released on driver detach. If desired,
- * release manually only with pcim_release_region().
- */
-int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
-{
- return _pcim_request_region(pdev, bar, name, 0);
-}
EXPORT_SYMBOL(pcim_request_region);
/**
- * pcim_request_region_exclusive - Request a PCI BAR exclusively
- * @pdev: PCI device to request region for
- * @bar: Index of BAR to request
- * @name: Name of the driver requesting the resource
- *
- * Returns: 0 on success, a negative error code on failure.
- *
- * Request region specified by @bar exclusively.
- *
- * The region will automatically be released on driver detach. If desired,
- * release manually only with pcim_release_region().
- */
-int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name)
-{
- return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE);
-}
-
-/**
* pcim_release_region - Release a PCI BAR
* @pdev: PCI device to operate on
* @bar: Index of BAR to release
@@ -893,7 +754,7 @@ int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *nam
* Release a region manually that was previously requested by
* pcim_request_region().
*/
-void pcim_release_region(struct pci_dev *pdev, int bar)
+static void pcim_release_region(struct pci_dev *pdev, int bar)
{
struct pcim_addr_devres res_searched;
@@ -956,30 +817,6 @@ err:
EXPORT_SYMBOL(pcim_request_all_regions);
/**
- * pcim_iounmap_regions - Unmap and release PCI BARs (DEPRECATED)
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to unmap and release
- *
- * Unmap and release regions specified by @mask.
- *
- * This function is DEPRECATED. Do not use it in new code.
- * Use pcim_iounmap_region() instead.
- */
-void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
-{
- int i;
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (!mask_contains_bar(mask, i))
- continue;
-
- pcim_iounmap_region(pdev, i);
- pcim_remove_bar_from_legacy_table(pdev, i);
- }
-}
-EXPORT_SYMBOL(pcim_iounmap_regions);
-
-/**
* pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR
* @pdev: PCI device to map IO resources for
* @bar: Index of the BAR
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 260b7de2dbd5..2c5e6446e00e 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -84,6 +84,8 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
goto err_exit_iomap;
}
+ cfg->priv = dev_get_drvdata(dev);
+
if (ops->init) {
err = ops->init(cfg);
if (err)
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 874cb097b093..e4da3fdb0007 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -408,11 +408,9 @@ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
*/
static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
{
- size_t align;
enum pci_barno barno;
struct epf_ntb_ctrl *ctrl;
u32 spad_size, ctrl_size;
- u64 size;
struct pci_epf *epf = ntb->epf;
struct device *dev = &epf->dev;
u32 spad_count;
@@ -422,31 +420,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
epf->func_no,
epf->vfunc_no);
barno = ntb->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar[barno].fixed_size;
- align = epc_features->align;
-
- if ((!IS_ALIGNED(size, align)))
- return -EINVAL;
-
spad_count = ntb->spad_count;
- ctrl_size = sizeof(struct epf_ntb_ctrl);
+ ctrl_size = ALIGN(sizeof(struct epf_ntb_ctrl), sizeof(u32));
spad_size = 2 * spad_count * sizeof(u32);
- if (!align) {
- ctrl_size = roundup_pow_of_two(ctrl_size);
- spad_size = roundup_pow_of_two(spad_size);
- } else {
- ctrl_size = ALIGN(ctrl_size, align);
- spad_size = ALIGN(spad_size, align);
- }
-
- if (!size)
- size = ctrl_size + spad_size;
- else if (size < ctrl_size + spad_size)
- return -EINVAL;
-
- base = pci_epf_alloc_space(epf, size, barno, epc_features, 0);
+ base = pci_epf_alloc_space(epf, ctrl_size + spad_size,
+ barno, epc_features, 0);
if (!base) {
dev_err(dev, "Config/Status/SPAD alloc region fail\n");
return -ENOMEM;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index beabea00af91..ca7f19cc973a 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -293,8 +293,6 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
if (interrupt < 0)
return 0;
- interrupt = 1 << interrupt;
-
return interrupt;
}
EXPORT_SYMBOL_GPL(pci_epc_get_msi);
@@ -304,28 +302,25 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
* @epc: the EPC device on which MSI has to be configured
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @interrupts: number of MSI interrupts required by the EPF
+ * @nr_irqs: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 nr_irqs)
{
int ret;
- u8 encode_int;
if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (interrupts < 1 || interrupts > 32)
+ if (nr_irqs < 1 || nr_irqs > 32)
return -EINVAL;
if (!epc->ops->set_msi)
return 0;
- encode_int = order_base_2(interrupts);
-
mutex_lock(&epc->lock);
- ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, nr_irqs);
mutex_unlock(&epc->lock);
return ret;
@@ -357,7 +352,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
if (interrupt < 0)
return 0;
- return interrupt + 1;
+ return interrupt;
}
EXPORT_SYMBOL_GPL(pci_epc_get_msix);
@@ -366,29 +361,28 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
* @epc: the EPC device on which MSI-X has to be configured
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @interrupts: number of MSI-X interrupts required by the EPF
+ * @nr_irqs: number of MSI-X interrupts required by the EPF
* @bir: BAR where the MSI-X table resides
* @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno bir, u32 offset)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 nr_irqs,
+ enum pci_barno bir, u32 offset)
{
int ret;
if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (interrupts < 1 || interrupts > 2048)
+ if (nr_irqs < 1 || nr_irqs > 2048)
return -EINVAL;
if (!epc->ops->set_msix)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
- offset);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, nr_irqs, bir, offset);
mutex_unlock(&epc->lock);
return ret;
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 394395c7f8de..577a9e490115 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -236,12 +236,13 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
}
dev = epc->dev.parent;
- dma_free_coherent(dev, epf_bar[bar].size, addr,
+ dma_free_coherent(dev, epf_bar[bar].aligned_size, addr,
epf_bar[bar].phys_addr);
epf_bar[bar].phys_addr = 0;
epf_bar[bar].addr = NULL;
epf_bar[bar].size = 0;
+ epf_bar[bar].aligned_size = 0;
epf_bar[bar].barno = 0;
epf_bar[bar].flags = 0;
}
@@ -264,7 +265,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
enum pci_epc_interface_type type)
{
u64 bar_fixed_size = epc_features->bar[bar].fixed_size;
- size_t align = epc_features->align;
+ size_t aligned_size, align = epc_features->align;
struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
struct pci_epc *epc;
@@ -285,12 +286,18 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
return NULL;
}
size = bar_fixed_size;
+ } else {
+ /* BAR size must be power of two */
+ size = roundup_pow_of_two(size);
}
- if (align)
- size = ALIGN(size, align);
- else
- size = roundup_pow_of_two(size);
+ /*
+ * Allocate enough memory to accommodate the iATU alignment
+ * requirement. In most cases, this will be the same as .size but
+ * it might be different if, for example, the fixed size of a BAR
+ * is smaller than align.
+ */
+ aligned_size = align ? ALIGN(size, align) : size;
if (type == PRIMARY_INTERFACE) {
epc = epf->epc;
@@ -301,7 +308,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
}
dev = epc->dev.parent;
- space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ space = dma_alloc_coherent(dev, aligned_size, &phys_addr, GFP_KERNEL);
if (!space) {
dev_err(dev, "failed to allocate mem space\n");
return NULL;
@@ -310,6 +317,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
epf_bar[bar].phys_addr = phys_addr;
epf_bar[bar].addr = space;
epf_bar[bar].size = size;
+ epf_bar[bar].aligned_size = aligned_size;
epf_bar[bar].barno = bar;
if (upper_32_bits(size) || epc_features->bar[bar].only_64bit)
epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index d30f1316c98e..fadcf98a8a66 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -20,13 +20,9 @@
#include <linux/types.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
-#include <linux/pagemap.h>
#include <linux/init.h>
-#include <linux/mount.h>
-#include <linux/namei.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
-#include <linux/uaccess.h>
#include "../pci.h"
#include "cpci_hotplug.h"
@@ -492,6 +488,75 @@ void pci_hp_destroy(struct hotplug_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_hp_destroy);
+static DECLARE_WAIT_QUEUE_HEAD(pci_hp_link_change_wq);
+
+/**
+ * pci_hp_ignore_link_change - begin code section causing spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Mark the beginning of a code section causing spurious link changes on the
+ * Secondary Bus of @pdev, e.g. as a side effect of a Secondary Bus Reset,
+ * D3cold transition, firmware update or FPGA reconfiguration.
+ *
+ * Hotplug drivers can thus check whether such a code section is executing
+ * concurrently, await it with pci_hp_spurious_link_change() and ignore the
+ * resulting link change events.
+ *
+ * Must be paired with pci_hp_unignore_link_change(). May be called both
+ * from the PCI core and from Endpoint drivers. May be called for bridges
+ * which are not hotplug-capable, in which case it has no effect because
+ * no hotplug driver is bound to the bridge.
+ */
+void pci_hp_ignore_link_change(struct pci_dev *pdev)
+{
+ set_bit(PCI_LINK_CHANGING, &pdev->priv_flags);
+ smp_mb__after_atomic(); /* pairs with implied barrier of wait_event() */
+}
+
+/**
+ * pci_hp_unignore_link_change - end code section causing spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Mark the end of a code section causing spurious link changes on the
+ * Secondary Bus of @pdev. Must be paired with pci_hp_ignore_link_change().
+ */
+void pci_hp_unignore_link_change(struct pci_dev *pdev)
+{
+ set_bit(PCI_LINK_CHANGED, &pdev->priv_flags);
+ mb(); /* ensure pci_hp_spurious_link_change() sees either bit set */
+ clear_bit(PCI_LINK_CHANGING, &pdev->priv_flags);
+ wake_up_all(&pci_hp_link_change_wq);
+}
+
+/**
+ * pci_hp_spurious_link_change - check for spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Check whether a code section is executing concurrently which is causing
+ * spurious link changes on the Secondary Bus of @pdev. Await the end of the
+ * code section if so.
+ *
+ * May be called by hotplug drivers to check whether a link change is spurious
+ * and can be ignored.
+ *
+ * Because a genuine link change may have occurred in-between a spurious link
+ * change and the invocation of this function, hotplug drivers should perform
+ * sanity checks such as retrieving the current link state and bringing down
+ * the slot if the link is down.
+ *
+ * Return: %true if such a code section has been executing concurrently,
+ * otherwise %false. Also return %true if such a code section has not been
+ * executing concurrently, but at least once since the last invocation of this
+ * function.
+ */
+bool pci_hp_spurious_link_change(struct pci_dev *pdev)
+{
+ wait_event(pci_hp_link_change_wq,
+ !test_bit(PCI_LINK_CHANGING, &pdev->priv_flags));
+
+ return test_and_clear_bit(PCI_LINK_CHANGED, &pdev->priv_flags);
+}
+
static int __init pci_hotplug_init(void)
{
int result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 273dd8c66f4e..debc79b0adfb 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -187,6 +187,7 @@ int pciehp_card_present(struct controller *ctrl);
int pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
int pciehp_check_link_active(struct controller *ctrl);
+bool pciehp_device_replaced(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 997841c69893..f59baa912970 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -284,35 +284,6 @@ static int pciehp_suspend(struct pcie_device *dev)
return 0;
}
-static bool pciehp_device_replaced(struct controller *ctrl)
-{
- struct pci_dev *pdev __free(pci_dev_put) = NULL;
- u32 reg;
-
- if (pci_dev_is_disconnected(ctrl->pcie->port))
- return false;
-
- pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
- if (!pdev)
- return true;
-
- if (pci_read_config_dword(pdev, PCI_VENDOR_ID, &reg) ||
- reg != (pdev->vendor | (pdev->device << 16)) ||
- pci_read_config_dword(pdev, PCI_CLASS_REVISION, &reg) ||
- reg != (pdev->revision | (pdev->class << 8)))
- return true;
-
- if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
- (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, &reg) ||
- reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16))))
- return true;
-
- if (pci_get_dsn(pdev) != ctrl->dsn)
- return true;
-
- return false;
-}
-
static int pciehp_resume_noirq(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index d603a7aa7483..bcc938d4420f 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -131,7 +131,7 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
INDICATOR_NOOP);
/* Don't carry LBMS indications across */
- pcie_reset_lbms_count(ctrl->pcie->port);
+ pcie_reset_lbms(ctrl->pcie->port);
}
static int pciehp_enable_slot(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 8a09fb6083e2..ebd342bda235 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -563,20 +563,50 @@ void pciehp_power_off_slot(struct controller *ctrl)
PCI_EXP_SLTCTL_PWR_OFF);
}
-static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
- struct pci_dev *pdev, int irq)
+bool pciehp_device_replaced(struct controller *ctrl)
+{
+ struct pci_dev *pdev __free(pci_dev_put) = NULL;
+ u32 reg;
+
+ if (pci_dev_is_disconnected(ctrl->pcie->port))
+ return false;
+
+ pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
+ if (!pdev)
+ return true;
+
+ if (pci_read_config_dword(pdev, PCI_VENDOR_ID, &reg) ||
+ reg != (pdev->vendor | (pdev->device << 16)) ||
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &reg) ||
+ reg != (pdev->revision | (pdev->class << 8)))
+ return true;
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, &reg) ||
+ reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16))))
+ return true;
+
+ if (pci_get_dsn(pdev) != ctrl->dsn)
+ return true;
+
+ return false;
+}
+
+static void pciehp_ignore_link_change(struct controller *ctrl,
+ struct pci_dev *pdev, int irq,
+ u16 ignored_events)
{
/*
* Ignore link changes which occurred while waiting for DPC recovery.
* Could be several if DPC triggered multiple times consecutively.
+ * Also ignore link changes caused by Secondary Bus Reset, etc.
*/
synchronize_hardirq(irq);
- atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
+ atomic_and(~ignored_events, &ctrl->pending_events);
if (pciehp_poll_mode)
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_DLLSC);
- ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
- slot_name(ctrl));
+ ignored_events);
+ ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored\n", slot_name(ctrl));
/*
* If the link is unexpectedly down after successful recovery,
@@ -584,8 +614,8 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
* Synthesize it to ensure that it is acted on.
*/
down_read_nested(&ctrl->reset_lock, ctrl->depth);
- if (!pciehp_check_link_active(ctrl))
- pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
+ if (!pciehp_check_link_active(ctrl) || pciehp_device_replaced(ctrl))
+ pciehp_request(ctrl, ignored_events);
up_read(&ctrl->reset_lock);
}
@@ -732,12 +762,19 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
/*
* Ignore Link Down/Up events caused by Downstream Port Containment
- * if recovery from the error succeeded.
+ * if recovery succeeded, or caused by Secondary Bus Reset,
+ * suspend to D3cold, firmware update, FPGA reconfiguration, etc.
*/
- if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
+ if ((events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) &&
+ (pci_dpc_recovered(pdev) || pci_hp_spurious_link_change(pdev)) &&
ctrl->state == ON_STATE) {
- events &= ~PCI_EXP_SLTSTA_DLLSC;
- pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
+ u16 ignored_events = PCI_EXP_SLTSTA_DLLSC;
+
+ if (!ctrl->inband_presence_disabled)
+ ignored_events |= events & PCI_EXP_SLTSTA_PDC;
+
+ events &= ~ignored_events;
+ pciehp_ignore_link_change(ctrl, pdev, irq, ignored_events);
}
/*
@@ -902,7 +939,6 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
- u16 stat_mask = 0, ctrl_mask = 0;
int rc;
if (probe)
@@ -910,23 +946,11 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
down_write_nested(&ctrl->reset_lock, ctrl->depth);
- if (!ATTN_BUTTN(ctrl)) {
- ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
- stat_mask |= PCI_EXP_SLTSTA_PDC;
- }
- ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
- stat_mask |= PCI_EXP_SLTSTA_DLLSC;
-
- pcie_write_cmd(ctrl, 0, ctrl_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
+ pci_hp_ignore_link_change(pdev);
rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
- pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
+ pci_hp_unignore_link_change(pdev);
up_write(&ctrl->reset_lock);
return rc;
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
index fe706ed946df..ea86c282a386 100644
--- a/drivers/pci/iomap.c
+++ b/drivers/pci/iomap.c
@@ -25,10 +25,6 @@
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device().
* */
void __iomem *pci_iomap_range(struct pci_dev *dev,
int bar,
@@ -76,10 +72,6 @@ EXPORT_SYMBOL(pci_iomap_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device().
* */
void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
int bar,
@@ -127,10 +119,6 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device(). If you need automatic cleanup, use pcim_iomap().
* */
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
@@ -152,10 +140,6 @@ EXPORT_SYMBOL(pci_iomap);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device().
* */
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
{
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index ab7a8252bf41..3579265f1198 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -966,3 +966,47 @@ u32 of_pci_get_slot_power_limit(struct device_node *node,
return slot_power_limit_mw;
}
EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit);
+
+/**
+ * of_pci_get_equalization_presets - Parses the "eq-presets-Ngts" property.
+ *
+ * @dev: Device containing the properties.
+ * @presets: Pointer to store the parsed data.
+ * @num_lanes: Maximum number of lanes supported.
+ *
+ * If the property is present, read and store the data in the @presets structure.
+ * Else, assign a default value of PCI_EQ_RESV.
+ *
+ * Return: 0 if the property is not available or successfully parsed else
+ * errno otherwise.
+ */
+int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes)
+{
+ char name[20];
+ int ret;
+
+ presets->eq_presets_8gts[0] = PCI_EQ_RESV;
+ ret = of_property_read_u16_array(dev->of_node, "eq-presets-8gts",
+ presets->eq_presets_8gts, num_lanes);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Error reading eq-presets-8gts: %d\n", ret);
+ return ret;
+ }
+
+ for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++) {
+ presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV;
+ snprintf(name, sizeof(name), "eq-presets-%dgts", 8 << (i + 1));
+ ret = of_property_read_u8_array(dev->of_node, name,
+ presets->eq_presets_Ngts[i],
+ num_lanes);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Error reading %s: %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_equalization_presets);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index af370628e583..b78e0e417324 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -1676,24 +1676,19 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
return NULL;
root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
- if (!root_ops) {
- kfree(ri);
- return NULL;
- }
+ if (!root_ops)
+ goto free_ri;
ri->cfg = pci_acpi_setup_ecam_mapping(root);
- if (!ri->cfg) {
- kfree(ri);
- kfree(root_ops);
- return NULL;
- }
+ if (!ri->cfg)
+ goto free_root_ops;
root_ops->release_info = pci_acpi_generic_release_info;
root_ops->prepare_resources = pci_acpi_root_prepare_resources;
root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
if (!bus)
- return NULL;
+ goto free_cfg;
/* If we must preserve the resource configuration, claim now */
host = pci_find_host_bridge(bus);
@@ -1710,6 +1705,14 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
pcie_bus_configure_settings(child);
return bus;
+
+free_cfg:
+ pci_ecam_free(ri->cfg);
+free_root_ops:
+ kfree(root_ops);
+free_ri:
+ kfree(ri);
+ return NULL;
}
void pcibios_add_bus(struct pci_bus *bus)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c8bd71a739f7..67db34fd10ee 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -555,12 +555,6 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev)
pci_enable_wake(pci_dev, PCI_D0, false);
}
-static void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
-{
- pci_power_up(pci_dev);
- pci_update_current_state(pci_dev, PCI_D0);
-}
-
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
pci_pm_power_up_and_verify_state(pci_dev);
@@ -1507,7 +1501,7 @@ static int pci_bus_match(struct device *dev, const struct device_driver *drv)
struct pci_driver *pci_drv;
const struct pci_device_id *found_id;
- if (!pci_dev->match_driver)
+ if (pci_dev_binding_disallowed(pci_dev))
return 0;
pci_drv = (struct pci_driver *)to_pci_driver(drv);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c6cda56ca52c..268c69daa4d5 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1475,6 +1475,9 @@ static ssize_t reset_method_store(struct device *dev,
return count;
}
+ pm_runtime_get_sync(dev);
+ struct device *pmdev __free(pm_runtime_put) = dev;
+
if (sysfs_streq(buf, "default")) {
pci_init_reset_methods(pdev);
return count;
@@ -1805,6 +1808,7 @@ const struct attribute_group *pci_dev_attr_groups[] = {
&pcie_dev_attr_group,
#ifdef CONFIG_PCIEAER
&aer_stats_attr_group,
+ &aer_attr_group,
#endif
#ifdef CONFIG_PCIEASPM
&aspm_ctrl_attr_group,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e77d5b53c0ce..e9448d55113b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3192,6 +3192,12 @@ void pci_d3cold_disable(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_d3cold_disable);
+void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
+{
+ pci_power_up(pci_dev);
+ pci_update_current_state(pci_dev, PCI_D0);
+}
+
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
@@ -3202,9 +3208,6 @@ void pci_pm_init(struct pci_dev *dev)
u16 status;
u16 pmc;
- pm_runtime_forbid(&dev->dev);
- pm_runtime_set_active(&dev->dev);
- pm_runtime_enable(&dev->dev);
device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
@@ -3266,6 +3269,10 @@ void pci_pm_init(struct pci_dev *dev)
pci_read_config_word(dev, PCI_STATUS, &status);
if (status & PCI_STATUS_IMM_READY)
dev->imm_ready = 1;
+ pci_pm_power_up_and_verify_state(dev);
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_set_active(&dev->dev);
+ pm_runtime_enable(&dev->dev);
}
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
@@ -3937,16 +3944,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
if (!pci_bar_index_is_valid(bar))
return;
- /*
- * This is done for backwards compatibility, because the old PCI devres
- * API had a mode in which the function became managed if it had been
- * enabled with pcim_enable_device() instead of pci_enable_device().
- */
- if (pci_is_managed(pdev)) {
- pcim_release_region(pdev, bar);
- return;
- }
-
if (pci_resource_len(pdev, bar) == 0)
return;
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
@@ -3984,13 +3981,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
if (!pci_bar_index_is_valid(bar))
return -EINVAL;
- if (pci_is_managed(pdev)) {
- if (exclusive == IORESOURCE_EXCLUSIVE)
- return pcim_request_region_exclusive(pdev, bar, name);
-
- return pcim_request_region(pdev, bar, name);
- }
-
if (pci_resource_len(pdev, bar) == 0)
return 0;
@@ -4027,11 +4017,6 @@ err_out:
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
{
@@ -4084,11 +4069,6 @@ err_out:
* @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *name)
@@ -4104,11 +4084,6 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
const char *name)
@@ -4144,11 +4119,6 @@ EXPORT_SYMBOL(pci_release_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions(struct pci_dev *pdev, const char *name)
{
@@ -4173,11 +4143,6 @@ EXPORT_SYMBOL(pci_request_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning message is also
* printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
{
@@ -4257,7 +4222,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
#ifndef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
if (!(res->flags & IORESOURCE_IO))
@@ -4290,7 +4255,7 @@ EXPORT_SYMBOL(pci_remap_iospace);
*/
void pci_unmap_iospace(struct resource *res)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
vunmap_range(vaddr, vaddr + resource_size(res));
@@ -4718,6 +4683,11 @@ static int pcie_wait_for_link_status(struct pci_dev *pdev,
* @pdev: Device whose link to retrain.
* @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
*
+ * Trigger retraining of the PCIe Link and wait for the completion of the
+ * retraining. As link retraining is known to asserts LBMS and may change
+ * the Link Speed, LBMS is cleared after the retraining and the Link Speed
+ * of the subordinate bus is updated.
+ *
* Retrain completion status is retrieved from the Link Status Register
* according to @use_lt. It is not verified whether the use of the DLLLA
* bit is valid.
@@ -4757,7 +4727,19 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
* to track link speed or width changes made by hardware itself
* in attempt to correct unreliable link operation.
*/
- pcie_reset_lbms_count(pdev);
+ pcie_reset_lbms(pdev);
+
+ /*
+ * Ensure the Link Speed updates after retraining in case the Link
+ * Speed was changed because of the retraining. While the bwctrl's
+ * IRQ handler normally picks up the new Link Speed, clearing LBMS
+ * races with the IRQ handler reading the Link Status register and
+ * can result in the handler returning early without updating the
+ * Link Speed.
+ */
+ if (pdev->subordinate)
+ pcie_update_link_speed(pdev->subordinate);
+
return rc;
}
@@ -4954,7 +4936,7 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
- pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
+ pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
return -ENOTTY;
}
@@ -5538,7 +5520,8 @@ static void pci_slot_unlock(struct pci_slot *slot)
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
}
@@ -6802,11 +6785,6 @@ int __weak pci_ext_cfg_avail(void)
return 1;
}
-void __weak pci_fixup_cardbus(struct pci_bus *bus)
-{
-}
-EXPORT_SYMBOL(pci_fixup_cardbus);
-
static int __init pci_setup(char *str)
{
while (str) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 39f368d2f26d..12215ee72afb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -9,6 +9,8 @@ struct pcie_tlp_log;
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
+#define MAX_NR_LANES 16
+
#define PCI_FIND_CAP_TTL 48
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
@@ -148,6 +150,7 @@ void pci_dev_adjust_pme(struct pci_dev *dev);
void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
+void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev);
void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_msi_init(struct pci_dev *dev);
@@ -227,6 +230,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
/* Functions for PCI Hotplug drivers to use */
int pci_hp_add_bridge(struct pci_dev *dev);
+bool pci_hp_spurious_link_change(struct pci_dev *pdev);
#if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY)
void pci_create_legacy_files(struct pci_bus *bus);
@@ -557,6 +561,10 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
#define PCI_DPC_RECOVERED 1
#define PCI_DPC_RECOVERING 2
#define PCI_DEV_REMOVED 3
+#define PCI_LINK_CHANGED 4
+#define PCI_LINK_CHANGING 5
+#define PCI_LINK_LBMS_SEEN 6
+#define PCI_DEV_ALLOW_BINDING 7
static inline void pci_dev_assign_added(struct pci_dev *dev)
{
@@ -580,6 +588,16 @@ static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev)
return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags);
}
+static inline void pci_dev_allow_binding(struct pci_dev *dev)
+{
+ set_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags);
+}
+
+static inline bool pci_dev_binding_disallowed(struct pci_dev *dev)
+{
+ return !test_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags);
+}
+
#ifdef CONFIG_PCIEAER
#include <linux/aer.h>
@@ -587,12 +605,15 @@ static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev)
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int ratelimit_print[AER_MAX_MULTI_ERR_DEVICES];
int error_dev_num;
+ const char *level; /* printk level */
unsigned int id:16;
unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
- unsigned int __pad1:5;
+ unsigned int root_ratelimit_print:1; /* 0=skip, 1=print */
+ unsigned int __pad1:4;
unsigned int multi_error_valid:1;
unsigned int first_error:5;
@@ -604,15 +625,16 @@ struct aer_err_info {
struct pcie_tlp_log tlp; /* TLP Header */
};
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
-void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+int aer_get_device_error_info(struct aer_err_info *info, int i);
+void aer_print_error(struct aer_err_info *info, int i);
int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
unsigned int tlp_len, bool flit,
struct pcie_tlp_log *log);
unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc);
void pcie_print_tlp_log(const struct pci_dev *dev,
- const struct pcie_tlp_log *log, const char *pfx);
+ const struct pcie_tlp_log *log, const char *level,
+ const char *pfx);
#endif /* CONFIG_PCIEAER */
#ifdef CONFIG_PCIEPORTBUS
@@ -824,14 +846,9 @@ static inline void pcie_ecrc_get_policy(char *str) { }
#endif
#ifdef CONFIG_PCIEPORTBUS
-void pcie_reset_lbms_count(struct pci_dev *port);
-int pcie_lbms_count(struct pci_dev *port, unsigned long *val);
+void pcie_reset_lbms(struct pci_dev *port);
#else
-static inline void pcie_reset_lbms_count(struct pci_dev *port) {}
-static inline int pcie_lbms_count(struct pci_dev *port, unsigned long *val)
-{
- return -EOPNOTSUPP;
-}
+static inline void pcie_reset_lbms(struct pci_dev *port) {}
#endif
struct pci_dev_reset_methods {
@@ -876,6 +893,21 @@ static inline u64 pci_rebar_size_to_bytes(int size)
struct device_node;
+#define PCI_EQ_RESV 0xff
+
+enum equalization_preset_type {
+ EQ_PRESET_TYPE_8GTS,
+ EQ_PRESET_TYPE_16GTS,
+ EQ_PRESET_TYPE_32GTS,
+ EQ_PRESET_TYPE_64GTS,
+ EQ_PRESET_TYPE_MAX
+};
+
+struct pci_eq_presets {
+ u16 eq_presets_8gts[MAX_NR_LANES];
+ u8 eq_presets_Ngts[EQ_PRESET_TYPE_MAX - 1][MAX_NR_LANES];
+};
+
#ifdef CONFIG_OF
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
@@ -890,7 +922,9 @@ void pci_release_bus_of_node(struct pci_bus *bus);
int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge);
bool of_pci_supply_present(struct device_node *np);
-
+int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes);
#else
static inline int
of_get_pci_domain_nr(struct device_node *node)
@@ -935,6 +969,17 @@ static inline bool of_pci_supply_present(struct device_node *np)
{
return false;
}
+
+static inline int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes)
+{
+ presets->eq_presets_8gts[0] = PCI_EQ_RESV;
+ for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++)
+ presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV;
+
+ return 0;
+}
#endif /* CONFIG_OF */
struct of_changeset;
@@ -961,6 +1006,7 @@ void pci_no_aer(void);
void pci_aer_init(struct pci_dev *dev);
void pci_aer_exit(struct pci_dev *dev);
extern const struct attribute_group aer_stats_attr_group;
+extern const struct attribute_group aer_attr_group;
void pci_aer_clear_fatal_status(struct pci_dev *dev);
int pci_aer_clear_status(struct pci_dev *dev);
int pci_aer_raw_clear_status(struct pci_dev *dev);
@@ -1059,11 +1105,6 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
}
#endif
-int pcim_intx(struct pci_dev *dev, int enable);
-int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
- const char *name);
-void pcim_release_region(struct pci_dev *pdev, int bar);
-
#ifdef CONFIG_PCI_MSI
int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag);
#else
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index a1cf8c7ef628..70ac66188367 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -28,6 +28,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <acpi/apei.h>
#include <acpi/ghes.h>
@@ -54,8 +55,8 @@ struct aer_rpc {
DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
};
-/* AER stats for the device */
-struct aer_stats {
+/* AER info for the device */
+struct aer_info {
/*
* Fields for all AER capable devices. They indicate the errors
@@ -88,6 +89,10 @@ struct aer_stats {
u64 rootport_total_cor_errs;
u64 rootport_total_fatal_errs;
u64 rootport_total_nonfatal_errs;
+
+ /* Ratelimits for errors */
+ struct ratelimit_state correctable_ratelimit;
+ struct ratelimit_state nonfatal_ratelimit;
};
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
@@ -377,7 +382,12 @@ void pci_aer_init(struct pci_dev *dev)
if (!dev->aer_cap)
return;
- dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+ dev->aer_info = kzalloc(sizeof(*dev->aer_info), GFP_KERNEL);
+
+ ratelimit_state_init(&dev->aer_info->correctable_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+ ratelimit_state_init(&dev->aer_info->nonfatal_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
/*
* We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
@@ -398,8 +408,8 @@ void pci_aer_init(struct pci_dev *dev)
void pci_aer_exit(struct pci_dev *dev)
{
- kfree(dev->aer_stats);
- dev->aer_stats = NULL;
+ kfree(dev->aer_info);
+ dev->aer_info = NULL;
}
#define AER_AGENT_RECEIVER 0
@@ -537,10 +547,10 @@ static const char *aer_agent_string[] = {
{ \
unsigned int i; \
struct pci_dev *pdev = to_pci_dev(dev); \
- u64 *stats = pdev->aer_stats->stats_array; \
+ u64 *stats = pdev->aer_info->stats_array; \
size_t len = 0; \
\
- for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
+ for (i = 0; i < ARRAY_SIZE(pdev->aer_info->stats_array); i++) { \
if (strings_array[i]) \
len += sysfs_emit_at(buf, len, "%s %llu\n", \
strings_array[i], \
@@ -551,7 +561,7 @@ static const char *aer_agent_string[] = {
i, stats[i]); \
} \
len += sysfs_emit_at(buf, len, "TOTAL_%s %llu\n", total_string, \
- pdev->aer_stats->total_field); \
+ pdev->aer_info->total_field); \
return len; \
} \
static DEVICE_ATTR_RO(name)
@@ -572,7 +582,7 @@ aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
- return sysfs_emit(buf, "%llu\n", pdev->aer_stats->field); \
+ return sysfs_emit(buf, "%llu\n", pdev->aer_info->field); \
} \
static DEVICE_ATTR_RO(name)
@@ -599,7 +609,7 @@ static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
- if (!pdev->aer_stats)
+ if (!pdev->aer_info)
return 0;
if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
@@ -617,31 +627,136 @@ const struct attribute_group aer_stats_attr_group = {
.is_visible = aer_stats_attrs_are_visible,
};
+/*
+ * Ratelimit interval
+ * <=0: disabled with ratelimit.interval = 0
+ * >0: enabled with ratelimit.interval in ms
+ */
+#define aer_ratelimit_interval_attr(name, ratelimit) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ \
+ return sysfs_emit(buf, "%d\n", \
+ pdev->aer_info->ratelimit.interval); \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ int interval; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ if (kstrtoint(buf, 0, &interval) < 0) \
+ return -EINVAL; \
+ \
+ if (interval <= 0) \
+ interval = 0; \
+ else \
+ interval = msecs_to_jiffies(interval); \
+ \
+ pdev->aer_info->ratelimit.interval = interval; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR_RW(name);
+
+#define aer_ratelimit_burst_attr(name, ratelimit) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ \
+ return sysfs_emit(buf, "%d\n", \
+ pdev->aer_info->ratelimit.burst); \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ int burst; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ if (kstrtoint(buf, 0, &burst) < 0) \
+ return -EINVAL; \
+ \
+ pdev->aer_info->ratelimit.burst = burst; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR_RW(name);
+
+#define aer_ratelimit_attrs(name) \
+ aer_ratelimit_interval_attr(name##_ratelimit_interval_ms, \
+ name##_ratelimit) \
+ aer_ratelimit_burst_attr(name##_ratelimit_burst, \
+ name##_ratelimit)
+
+aer_ratelimit_attrs(correctable)
+aer_ratelimit_attrs(nonfatal)
+
+static struct attribute *aer_attrs[] = {
+ &dev_attr_correctable_ratelimit_interval_ms.attr,
+ &dev_attr_correctable_ratelimit_burst.attr,
+ &dev_attr_nonfatal_ratelimit_interval_ms.attr,
+ &dev_attr_nonfatal_ratelimit_burst.attr,
+ NULL
+};
+
+static umode_t aer_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (!pdev->aer_info)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group aer_attr_group = {
+ .name = "aer",
+ .attrs = aer_attrs,
+ .is_visible = aer_attrs_are_visible,
+};
+
static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_info *info)
{
unsigned long status = info->status & ~info->mask;
int i, max = -1;
u64 *counter = NULL;
- struct aer_stats *aer_stats = pdev->aer_stats;
+ struct aer_info *aer_info = pdev->aer_info;
- if (!aer_stats)
+ if (!aer_info)
return;
switch (info->severity) {
case AER_CORRECTABLE:
- aer_stats->dev_total_cor_errs++;
- counter = &aer_stats->dev_cor_errs[0];
+ aer_info->dev_total_cor_errs++;
+ counter = &aer_info->dev_cor_errs[0];
max = AER_MAX_TYPEOF_COR_ERRS;
break;
case AER_NONFATAL:
- aer_stats->dev_total_nonfatal_errs++;
- counter = &aer_stats->dev_nonfatal_errs[0];
+ aer_info->dev_total_nonfatal_errs++;
+ counter = &aer_info->dev_nonfatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
case AER_FATAL:
- aer_stats->dev_total_fatal_errs++;
- counter = &aer_stats->dev_fatal_errs[0];
+ aer_info->dev_total_fatal_errs++;
+ counter = &aer_info->dev_fatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
}
@@ -653,37 +768,46 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_source *e_src)
{
- struct aer_stats *aer_stats = pdev->aer_stats;
+ struct aer_info *aer_info = pdev->aer_info;
- if (!aer_stats)
+ if (!aer_info)
return;
if (e_src->status & PCI_ERR_ROOT_COR_RCV)
- aer_stats->rootport_total_cor_errs++;
+ aer_info->rootport_total_cor_errs++;
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- aer_stats->rootport_total_fatal_errs++;
+ aer_info->rootport_total_fatal_errs++;
else
- aer_stats->rootport_total_nonfatal_errs++;
+ aer_info->rootport_total_nonfatal_errs++;
+ }
+}
+
+static int aer_ratelimit(struct pci_dev *dev, unsigned int severity)
+{
+ switch (severity) {
+ case AER_NONFATAL:
+ return __ratelimit(&dev->aer_info->nonfatal_ratelimit);
+ case AER_CORRECTABLE:
+ return __ratelimit(&dev->aer_info->correctable_ratelimit);
+ default:
+ return 1; /* Don't ratelimit fatal errors */
}
}
-static void __aer_print_error(struct pci_dev *dev,
- struct aer_err_info *info)
+static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
const char **strings;
unsigned long status = info->status & ~info->mask;
- const char *level, *errmsg;
+ const char *level = info->level;
+ const char *errmsg;
int i;
- if (info->severity == AER_CORRECTABLE) {
+ if (info->severity == AER_CORRECTABLE)
strings = aer_correctable_error_string;
- level = KERN_WARNING;
- } else {
+ else
strings = aer_uncorrectable_error_string;
- level = KERN_ERR;
- }
for_each_set_bit(i, &status, 32) {
errmsg = strings[i];
@@ -693,14 +817,39 @@ static void __aer_print_error(struct pci_dev *dev,
aer_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
}
- pci_dev_aer_stats_incr(dev, info);
}
-void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
+static void aer_print_source(struct pci_dev *dev, struct aer_err_info *info,
+ bool found)
+{
+ u16 source = info->id;
+
+ pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d%s\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), PCI_BUS_NUM(source),
+ PCI_SLOT(source), PCI_FUNC(source),
+ found ? "" : " (no details found");
+}
+
+void aer_print_error(struct aer_err_info *info, int i)
{
- int layer, agent;
- int id = pci_dev_id(dev);
- const char *level;
+ struct pci_dev *dev;
+ int layer, agent, id;
+ const char *level = info->level;
+
+ if (WARN_ON_ONCE(i >= AER_MAX_MULTI_ERR_DEVICES))
+ return;
+
+ dev = info->dev[i];
+ id = pci_dev_id(dev);
+
+ pci_dev_aer_stats_incr(dev, info);
+ trace_aer_event(pci_name(dev), (info->status & ~info->mask),
+ info->severity, info->tlp_header_valid, &info->tlp);
+
+ if (!info->ratelimit_print[i])
+ return;
if (!info->status) {
pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
@@ -711,8 +860,6 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
-
aer_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], aer_agent_string[agent]);
@@ -723,26 +870,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
__aer_print_error(dev, info);
if (info->tlp_header_valid)
- pcie_print_tlp_log(dev, &info->tlp, dev_fmt(" "));
+ pcie_print_tlp_log(dev, &info->tlp, level, dev_fmt(" "));
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
pci_err(dev, " Error of this Agent is reported first\n");
-
- trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
- info->severity, info->tlp_header_valid, &info->tlp);
-}
-
-static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
-{
- u8 bus = info->id >> 8;
- u8 devfn = info->id & 0xff;
-
- pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n",
- info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity],
- pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -765,40 +897,48 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity,
{
int layer, agent, tlp_header_valid = 0;
u32 status, mask;
- struct aer_err_info info;
+ struct aer_err_info info = {
+ .severity = aer_severity,
+ .first_error = PCI_ERR_CAP_FEP(aer->cap_control),
+ };
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
+ info.level = KERN_WARNING;
} else {
status = aer->uncor_status;
mask = aer->uncor_mask;
+ info.level = KERN_ERR;
tlp_header_valid = status & AER_LOG_TLP_MASKS;
}
- layer = AER_GET_LAYER_ERROR(aer_severity, status);
- agent = AER_GET_AGENT(aer_severity, status);
-
- memset(&info, 0, sizeof(info));
- info.severity = aer_severity;
info.status = status;
info.mask = mask;
- info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
- pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
+ pci_dev_aer_stats_incr(dev, &info);
+ trace_aer_event(pci_name(dev), (status & ~mask),
+ aer_severity, tlp_header_valid, &aer->header_log);
+
+ if (!aer_ratelimit(dev, info.severity))
+ return;
+
+ layer = AER_GET_LAYER_ERROR(aer_severity, status);
+ agent = AER_GET_AGENT(aer_severity, status);
+
+ aer_printk(info.level, dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
+ status, mask);
__aer_print_error(dev, &info);
- pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
- aer_error_layer[layer], aer_agent_string[agent]);
+ aer_printk(info.level, dev, "aer_layer=%s, aer_agent=%s\n",
+ aer_error_layer[layer], aer_agent_string[agent]);
if (aer_severity != AER_CORRECTABLE)
- pci_err(dev, "aer_uncor_severity: 0x%08x\n",
- aer->uncor_severity);
+ aer_printk(info.level, dev, "aer_uncor_severity: 0x%08x\n",
+ aer->uncor_severity);
if (tlp_header_valid)
- pcie_print_tlp_log(dev, &aer->header_log, dev_fmt(" "));
-
- trace_aer_event(dev_name(&dev->dev), (status & ~mask),
- aer_severity, tlp_header_valid, &aer->header_log);
+ pcie_print_tlp_log(dev, &aer->header_log, info.level,
+ dev_fmt(" "));
}
EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL");
@@ -809,12 +949,27 @@ EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL");
*/
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
- if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
- e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
- e_info->error_dev_num++;
- return 0;
+ int i = e_info->error_dev_num;
+
+ if (i >= AER_MAX_MULTI_ERR_DEVICES)
+ return -ENOSPC;
+
+ e_info->dev[i] = pci_dev_get(dev);
+ e_info->error_dev_num++;
+
+ /*
+ * Ratelimit AER log messages. "dev" is either the source
+ * identified by the root's Error Source ID or it has an unmasked
+ * error logged in its own AER Capability. Messages are emitted
+ * when "ratelimit_print[i]" is non-zero. If we will print detail
+ * for a downstream device, make sure we print the Error Source ID
+ * from the root as well.
+ */
+ if (aer_ratelimit(dev, e_info->severity)) {
+ e_info->ratelimit_print[i] = 1;
+ e_info->root_ratelimit_print = 1;
}
- return -ENOSPC;
+ return 0;
}
/**
@@ -908,7 +1063,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
* e_info->error_dev_num and e_info->dev[], based on the given information.
*/
static bool find_source_device(struct pci_dev *parent,
- struct aer_err_info *e_info)
+ struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
int result;
@@ -926,15 +1081,8 @@ static bool find_source_device(struct pci_dev *parent,
else
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
- if (!e_info->error_dev_num) {
- u8 bus = e_info->id >> 8;
- u8 devfn = e_info->id & 0xff;
-
- pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n",
- pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
+ if (!e_info->error_dev_num)
return false;
- }
return true;
}
@@ -1141,9 +1289,10 @@ static void aer_recover_work_func(struct work_struct *work)
pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
entry.devfn);
if (!pdev) {
- pr_err("no pci_dev for %04x:%02x:%02x.%x\n",
- entry.domain, entry.bus,
- PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ pr_err_ratelimited("%04x:%02x:%02x.%x: no pci_dev found\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn),
+ PCI_FUNC(entry.devfn));
continue;
}
pci_print_aer(pdev, entry.severity, entry.regs);
@@ -1199,19 +1348,26 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
/**
* aer_get_device_error_info - read error status from dev and store it to info
- * @dev: pointer to the device expected to have an error record
* @info: pointer to structure to store the error record
+ * @i: index into info->dev[]
*
* Return: 1 on success, 0 on error.
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
+int aer_get_device_error_info(struct aer_err_info *info, int i)
{
- int type = pci_pcie_type(dev);
- int aer = dev->aer_cap;
+ struct pci_dev *dev;
+ int type, aer;
u32 aercc;
+ if (i >= AER_MAX_MULTI_ERR_DEVICES)
+ return 0;
+
+ dev = info->dev[i];
+ aer = dev->aer_cap;
+ type = pci_pcie_type(dev);
+
/* Must reset in this function */
info->status = 0;
info->tlp_header_valid = 0;
@@ -1263,63 +1419,87 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
/* Report all before handling them, to not lose records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
- aer_print_error(e_info->dev[i], e_info);
+ if (aer_get_device_error_info(e_info, i))
+ aer_print_error(e_info, i);
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info, i))
handle_error_source(e_info->dev[i], e_info);
}
}
/**
- * aer_isr_one_error - consume an error detected by Root Port
- * @rpc: pointer to the Root Port which holds an error
- * @e_src: pointer to an error source
+ * aer_isr_one_error_type - consume a Correctable or Uncorrectable Error
+ * detected by Root Port or RCEC
+ * @root: pointer to Root Port or RCEC that signaled AER interrupt
+ * @info: pointer to AER error info
*/
-static void aer_isr_one_error(struct aer_rpc *rpc,
- struct aer_err_source *e_src)
+static void aer_isr_one_error_type(struct pci_dev *root,
+ struct aer_err_info *info)
{
- struct pci_dev *pdev = rpc->rpd;
- struct aer_err_info e_info;
+ bool found;
- pci_rootport_aer_stats_incr(pdev, e_src);
+ found = find_source_device(root, info);
/*
- * There is a possibility that both correctable error and
- * uncorrectable error being logged. Report correctable error first.
+ * If we're going to log error messages, we've already set
+ * "info->root_ratelimit_print" and "info->ratelimit_print[i]" to
+ * non-zero (which enables printing) because this is either an
+ * ERR_FATAL or we found a device with an error logged in its AER
+ * Capability.
+ *
+ * If we didn't find the Error Source device, at least log the
+ * Requester ID from the ERR_* Message received by the Root Port or
+ * RCEC, ratelimited by the RP or RCEC.
*/
- if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
- e_info.id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
-
- if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
- aer_print_port_info(pdev, &e_info);
+ if (info->root_ratelimit_print ||
+ (!found && aer_ratelimit(root, info->severity)))
+ aer_print_source(root, info, found);
- if (find_source_device(pdev, &e_info))
- aer_process_err_devices(&e_info);
- }
-
- if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
- e_info.id = ERR_UNCOR_ID(e_src->id);
+ if (found)
+ aer_process_err_devices(info);
+}
- if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- e_info.severity = AER_FATAL;
- else
- e_info.severity = AER_NONFATAL;
+/**
+ * aer_isr_one_error - consume error(s) signaled by an AER interrupt from
+ * Root Port or RCEC
+ * @root: pointer to Root Port or RCEC that signaled AER interrupt
+ * @e_src: pointer to an error source
+ */
+static void aer_isr_one_error(struct pci_dev *root,
+ struct aer_err_source *e_src)
+{
+ u32 status = e_src->status;
- if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
+ pci_rootport_aer_stats_incr(root, e_src);
- aer_print_port_info(pdev, &e_info);
+ /*
+ * There is a possibility that both correctable error and
+ * uncorrectable error being logged. Report correctable error first.
+ */
+ if (status & PCI_ERR_ROOT_COR_RCV) {
+ int multi = status & PCI_ERR_ROOT_MULTI_COR_RCV;
+ struct aer_err_info e_info = {
+ .id = ERR_COR_ID(e_src->id),
+ .severity = AER_CORRECTABLE,
+ .level = KERN_WARNING,
+ .multi_error_valid = multi ? 1 : 0,
+ };
+
+ aer_isr_one_error_type(root, &e_info);
+ }
- if (find_source_device(pdev, &e_info))
- aer_process_err_devices(&e_info);
+ if (status & PCI_ERR_ROOT_UNCOR_RCV) {
+ int fatal = status & PCI_ERR_ROOT_FATAL_RCV;
+ int multi = status & PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ struct aer_err_info e_info = {
+ .id = ERR_UNCOR_ID(e_src->id),
+ .severity = fatal ? AER_FATAL : AER_NONFATAL,
+ .level = KERN_ERR,
+ .multi_error_valid = multi ? 1 : 0,
+ };
+
+ aer_isr_one_error_type(root, &e_info);
}
}
@@ -1340,7 +1520,7 @@ static irqreturn_t aer_isr(int irq, void *context)
return IRQ_NONE;
while (kfifo_get(&rpc->aer_fifo, &e_src))
- aer_isr_one_error(rpc, &e_src);
+ aer_isr_one_error(rpc->rpd, &e_src);
return IRQ_HANDLED;
}
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index d8d2aa85a229..36f939f23d34 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -38,24 +38,14 @@
/**
* struct pcie_bwctrl_data - PCIe bandwidth controller
* @set_speed_mutex: Serializes link speed changes
- * @lbms_count: Count for LBMS (since last reset)
* @cdev: Thermal cooling device associated with the port
*/
struct pcie_bwctrl_data {
struct mutex set_speed_mutex;
- atomic_t lbms_count;
struct thermal_cooling_device *cdev;
};
-/*
- * Prevent port removal during LBMS count accessors and Link Speed changes.
- *
- * These have to be differentiated because pcie_bwctrl_change_speed() calls
- * pcie_retrain_link() which uses LBMS count reset accessor on success
- * (using just one rwsem triggers "possible recursive locking detected"
- * warning).
- */
-static DECLARE_RWSEM(pcie_bwctrl_lbms_rwsem);
+/* Prevent port removal during Link Speed changes. */
static DECLARE_RWSEM(pcie_bwctrl_setspeed_rwsem);
static bool pcie_valid_speed(enum pci_bus_speed speed)
@@ -127,18 +117,7 @@ static int pcie_bwctrl_change_speed(struct pci_dev *port, u16 target_speed, bool
if (ret != PCIBIOS_SUCCESSFUL)
return pcibios_err_to_errno(ret);
- ret = pcie_retrain_link(port, use_lt);
- if (ret < 0)
- return ret;
-
- /*
- * Ensure link speed updates also with platforms that have problems
- * with notifications.
- */
- if (port->subordinate)
- pcie_update_link_speed(port->subordinate);
-
- return 0;
+ return pcie_retrain_link(port, use_lt);
}
/**
@@ -202,15 +181,14 @@ int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
static void pcie_bwnotif_enable(struct pcie_device *srv)
{
- struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
struct pci_dev *port = srv->port;
u16 link_status;
int ret;
- /* Count LBMS seen so far as one */
+ /* Note if LBMS has been seen so far */
ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
if (ret == PCIBIOS_SUCCESSFUL && link_status & PCI_EXP_LNKSTA_LBMS)
- atomic_inc(&data->lbms_count);
+ set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
pcie_capability_set_word(port, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
@@ -233,7 +211,6 @@ static void pcie_bwnotif_disable(struct pci_dev *port)
static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
{
struct pcie_device *srv = context;
- struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
struct pci_dev *port = srv->port;
u16 link_status, events;
int ret;
@@ -247,7 +224,7 @@ static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
return IRQ_NONE;
if (events & PCI_EXP_LNKSTA_LBMS)
- atomic_inc(&data->lbms_count);
+ set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
@@ -262,31 +239,10 @@ static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
return IRQ_HANDLED;
}
-void pcie_reset_lbms_count(struct pci_dev *port)
+void pcie_reset_lbms(struct pci_dev *port)
{
- struct pcie_bwctrl_data *data;
-
- guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem);
- data = port->link_bwctrl;
- if (data)
- atomic_set(&data->lbms_count, 0);
- else
- pcie_capability_write_word(port, PCI_EXP_LNKSTA,
- PCI_EXP_LNKSTA_LBMS);
-}
-
-int pcie_lbms_count(struct pci_dev *port, unsigned long *val)
-{
- struct pcie_bwctrl_data *data;
-
- guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem);
- data = port->link_bwctrl;
- if (!data)
- return -ENOTTY;
-
- *val = atomic_read(&data->lbms_count);
-
- return 0;
+ clear_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
}
static int pcie_bwnotif_probe(struct pcie_device *srv)
@@ -308,18 +264,16 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
return ret;
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- port->link_bwctrl = data;
-
- ret = request_irq(srv->irq, pcie_bwnotif_irq,
- IRQF_SHARED, "PCIe bwctrl", srv);
- if (ret) {
- port->link_bwctrl = NULL;
- return ret;
- }
+ port->link_bwctrl = data;
- pcie_bwnotif_enable(srv);
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
}
+
+ pcie_bwnotif_enable(srv);
}
pci_dbg(port, "enabled with IRQ %d\n", srv->irq);
@@ -339,13 +293,11 @@ static void pcie_bwnotif_remove(struct pcie_device *srv)
pcie_cooling_device_unregister(data->cdev);
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- pcie_bwnotif_disable(srv->port);
+ pcie_bwnotif_disable(srv->port);
- free_irq(srv->irq, srv);
+ free_irq(srv->irq, srv);
- srv->port->link_bwctrl = NULL;
- }
+ srv->port->link_bwctrl = NULL;
}
}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index df42f15c9829..fc18349614d7 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -222,7 +222,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
dpc_tlp_log_len(pdev),
pdev->subordinate->flit_mode,
&tlp_log);
- pcie_print_tlp_log(pdev, &tlp_log, dev_fmt(""));
+ pcie_print_tlp_log(pdev, &tlp_log, KERN_ERR, dev_fmt(""));
if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1)
goto clear_status;
@@ -252,46 +252,59 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
else
info->severity = AER_NONFATAL;
+ info->level = KERN_ERR;
+
+ info->dev[0] = dev;
+ info->error_dev_num = 1;
+
return 1;
}
void dpc_process_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
- struct aer_err_info info;
+ struct aer_err_info info = {};
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
-
- pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
- status, source);
reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN;
- ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
- pci_warn(pdev, "%s detected\n",
- (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR) ?
- "unmasked uncorrectable error" :
- (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE) ?
- "ERR_NONFATAL" :
- (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
- "ERR_FATAL" :
- (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
- "RP PIO error" :
- (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
- "software trigger" :
- "reserved error");
-
- /* show RP PIO error detail information */
- if (pdev->dpc_rp_extensions &&
- reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT &&
- ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO)
- dpc_process_rp_pio_error(pdev);
- else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR &&
- dpc_get_aer_uncorrect_severity(pdev, &info) &&
- aer_get_device_error_info(pdev, &info)) {
- aer_print_error(pdev, &info);
- pci_aer_clear_nonfatal_status(pdev);
- pci_aer_clear_fatal_status(pdev);
+
+ switch (reason) {
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR:
+ pci_warn(pdev, "containment event, status:%#06x: unmasked uncorrectable error detected\n",
+ status);
+ if (dpc_get_aer_uncorrect_severity(pdev, &info) &&
+ aer_get_device_error_info(&info, 0)) {
+ aer_print_error(&info, 0);
+ pci_aer_clear_nonfatal_status(pdev);
+ pci_aer_clear_fatal_status(pdev);
+ }
+ break;
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE:
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE:
+ pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID,
+ &source);
+ pci_warn(pdev, "containment event, status:%#06x, %s received from %04x:%02x:%02x.%d\n",
+ status,
+ (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
+ "ERR_FATAL" : "ERR_NONFATAL",
+ pci_domain_nr(pdev->bus), PCI_BUS_NUM(source),
+ PCI_SLOT(source), PCI_FUNC(source));
+ break;
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT:
+ ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
+ pci_warn(pdev, "containment event, status:%#06x: %s detected\n",
+ status,
+ (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
+ "RP PIO error" :
+ (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
+ "software trigger" :
+ "reserved error");
+ /* show RP PIO error detail information */
+ if (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO &&
+ pdev->dpc_rp_extensions)
+ dpc_process_rp_pio_error(pdev);
+ break;
}
}
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 31090770fffc..de6381c690f5 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -271,7 +271,6 @@ failed:
pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
- /* TODO: Should kernel panic here? */
pci_info(bridge, "device recovery failed\n");
return status;
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 7cfb6c0d5dcb..ee5f615a9023 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -5,6 +5,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -252,3 +253,302 @@ bool pcie_ptm_enabled(struct pci_dev *dev)
return dev->ptm_enabled;
}
EXPORT_SYMBOL(pcie_ptm_enabled);
+
+static ssize_t context_update_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
+ char buf[7];
+ int ret;
+ u8 mode;
+
+ if (!ptm_debugfs->ops->context_update_write)
+ return -EOPNOTSUPP;
+
+ if (count < 1 || count >= sizeof(buf))
+ return -EINVAL;
+
+ ret = copy_from_user(buf, ubuf, count);
+ if (ret)
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ if (sysfs_streq(buf, "auto"))
+ mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else if (sysfs_streq(buf, "manual"))
+ mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+ else
+ return -EINVAL;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_update_write(ptm_debugfs->pdata, mode);
+ mutex_unlock(&ptm_debugfs->lock);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t context_update_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
+ char buf[8]; /* Extra space for NULL termination at the end */
+ ssize_t pos;
+ u8 mode;
+
+ if (!ptm_debugfs->ops->context_update_read)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ptm_debugfs->ops->context_update_read(ptm_debugfs->pdata, &mode);
+ mutex_unlock(&ptm_debugfs->lock);
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO)
+ pos = scnprintf(buf, sizeof(buf), "auto\n");
+ else
+ pos = scnprintf(buf, sizeof(buf), "manual\n");
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+}
+
+static const struct file_operations context_update_fops = {
+ .open = simple_open,
+ .read = context_update_read,
+ .write = context_update_write,
+};
+
+static int context_valid_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ bool valid;
+ int ret;
+
+ if (!ptm_debugfs->ops->context_valid_read)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_valid_read(ptm_debugfs->pdata, &valid);
+ mutex_unlock(&ptm_debugfs->lock);
+ if (ret)
+ return ret;
+
+ *val = valid;
+
+ return 0;
+}
+
+static int context_valid_set(void *data, u64 val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ int ret;
+
+ if (!ptm_debugfs->ops->context_valid_write)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_valid_write(ptm_debugfs->pdata, !!val);
+ mutex_unlock(&ptm_debugfs->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(context_valid_fops, context_valid_get,
+ context_valid_set, "%llu\n");
+
+static int local_clock_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->local_clock_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->local_clock_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(local_clock_fops, local_clock_get, NULL, "%llu\n");
+
+static int master_clock_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->master_clock_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->master_clock_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(master_clock_fops, master_clock_get, NULL, "%llu\n");
+
+static int t1_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t1_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t1_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t1_fops, t1_get, NULL, "%llu\n");
+
+static int t2_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t2_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t2_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t2_fops, t2_get, NULL, "%llu\n");
+
+static int t3_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t3_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t3_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t3_fops, t3_get, NULL, "%llu\n");
+
+static int t4_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t4_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t4_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t4_fops, t4_get, NULL, "%llu\n");
+
+#define pcie_ptm_create_debugfs_file(pdata, mode, attr) \
+ do { \
+ if (ops->attr##_visible && ops->attr##_visible(pdata)) \
+ debugfs_create_file(#attr, mode, ptm_debugfs->debugfs, \
+ ptm_debugfs, &attr##_fops); \
+ } while (0)
+
+/*
+ * pcie_ptm_create_debugfs() - Create debugfs entries for the PTM context
+ * @dev: PTM capable component device
+ * @pdata: Private data of the PTM capable component device
+ * @ops: PTM callback structure
+ *
+ * Create debugfs entries for exposing the PTM context of the PTM capable
+ * components such as Root Complex and Endpoint controllers.
+ *
+ * Return: Pointer to 'struct pci_ptm_debugfs' if success, NULL otherwise.
+ */
+struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
+ const struct pcie_ptm_ops *ops)
+{
+ struct pci_ptm_debugfs *ptm_debugfs;
+ char *dirname;
+ int ret;
+
+ /* Caller must provide check_capability() callback */
+ if (!ops->check_capability)
+ return NULL;
+
+ /* Check for PTM capability before creating debugfs attrbutes */
+ ret = ops->check_capability(pdata);
+ if (!ret) {
+ dev_dbg(dev, "PTM capability not present\n");
+ return NULL;
+ }
+
+ ptm_debugfs = kzalloc(sizeof(*ptm_debugfs), GFP_KERNEL);
+ if (!ptm_debugfs)
+ return NULL;
+
+ dirname = devm_kasprintf(dev, GFP_KERNEL, "pcie_ptm_%s", dev_name(dev));
+ if (!dirname)
+ return NULL;
+
+ ptm_debugfs->debugfs = debugfs_create_dir(dirname, NULL);
+ ptm_debugfs->pdata = pdata;
+ ptm_debugfs->ops = ops;
+ mutex_init(&ptm_debugfs->lock);
+
+ pcie_ptm_create_debugfs_file(pdata, 0644, context_update);
+ pcie_ptm_create_debugfs_file(pdata, 0644, context_valid);
+ pcie_ptm_create_debugfs_file(pdata, 0444, local_clock);
+ pcie_ptm_create_debugfs_file(pdata, 0444, master_clock);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t1);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t2);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t3);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t4);
+
+ return ptm_debugfs;
+}
+EXPORT_SYMBOL_GPL(pcie_ptm_create_debugfs);
+
+/*
+ * pcie_ptm_destroy_debugfs() - Destroy debugfs entries for the PTM context
+ * @ptm_debugfs: Pointer to the PTM debugfs struct
+ */
+void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs)
+{
+ if (!ptm_debugfs)
+ return;
+
+ mutex_destroy(&ptm_debugfs->lock);
+ debugfs_remove_recursive(ptm_debugfs->debugfs);
+}
+EXPORT_SYMBOL_GPL(pcie_ptm_destroy_debugfs);
diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c
index 890d5391d7f5..71f8fc9ea2ed 100644
--- a/drivers/pci/pcie/tlp.c
+++ b/drivers/pci/pcie/tlp.c
@@ -98,12 +98,14 @@ int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
* pcie_print_tlp_log - Print TLP Header / Prefix Log contents
* @dev: PCIe device
* @log: TLP Log structure
+ * @level: Printk log level
* @pfx: String prefix
*
* Prints TLP Header and Prefix Log information held by @log.
*/
void pcie_print_tlp_log(const struct pci_dev *dev,
- const struct pcie_tlp_log *log, const char *pfx)
+ const struct pcie_tlp_log *log, const char *level,
+ const char *pfx)
{
/* EE_PREFIX_STR fits the extended DW space needed for the Flit mode */
char buf[11 * PCIE_STD_MAX_TLP_HEADERLOG + 1];
@@ -130,6 +132,6 @@ void pcie_print_tlp_log(const struct pci_dev *dev,
}
}
- pci_err(dev, "%sTLP Header%s: %s\n", pfx,
+ dev_printk(level, &dev->dev, "%sTLP Header%s: %s\n", pfx,
log->flit ? " (Flit)" : "", buf);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 364fa2a514f8..4b8693ec9e4c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2058,7 +2058,7 @@ int pci_setup_device(struct pci_dev *dev)
if (class == PCI_CLASS_BRIDGE_PCI)
goto bad;
pci_read_irq(dev);
- pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
+ pci_read_bases(dev, PCI_STD_NUM_BARS, PCI_ROM_ADDRESS);
pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device);
@@ -2711,7 +2711,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
pci_set_msi_domain(dev);
/* Notifier could use PCI capabilities */
- dev->match_driver = false;
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
diff --git a/drivers/pci/pwrctrl/Kconfig b/drivers/pci/pwrctrl/Kconfig
index 990cab67d413..6956c1854811 100644
--- a/drivers/pci/pwrctrl/Kconfig
+++ b/drivers/pci/pwrctrl/Kconfig
@@ -1,19 +1,19 @@
# SPDX-License-Identifier: GPL-2.0-only
-config HAVE_PWRCTL
+config HAVE_PWRCTRL
bool
-config PCI_PWRCTL
+config PCI_PWRCTRL
tristate
-config PCI_PWRCTL_PWRSEQ
+config PCI_PWRCTRL_PWRSEQ
tristate
select POWER_SEQUENCING
- select PCI_PWRCTL
+ select PCI_PWRCTRL
-config PCI_PWRCTL_SLOT
+config PCI_PWRCTRL_SLOT
tristate "PCI Power Control driver for PCI slots"
- select PCI_PWRCTL
+ select PCI_PWRCTRL
help
Say Y here to enable the PCI Power Control driver to control the power
state of PCI slots.
@@ -21,3 +21,13 @@ config PCI_PWRCTL_SLOT
This is a generic driver that controls the power state of different
PCI slots. The voltage regulators powering the rails of the PCI slots
are expected to be defined in the devicetree node of the PCI bridge.
+
+# deprecated
+config HAVE_PWRCTL
+ bool
+ select HAVE_PWRCTRL
+
+# deprecated
+config PCI_PWRCTL_PWRSEQ
+ tristate
+ select PCI_PWRCTRL_PWRSEQ
diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile
index ddfb12c5aadf..a4e5808d7850 100644
--- a/drivers/pci/pwrctrl/Makefile
+++ b/drivers/pci/pwrctrl/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctrl-core.o
+obj-$(CONFIG_PCI_PWRCTRL) += pci-pwrctrl-core.o
pci-pwrctrl-core-y := core.o
-obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctrl-pwrseq.o
+obj-$(CONFIG_PCI_PWRCTRL_PWRSEQ) += pci-pwrctrl-pwrseq.o
-obj-$(CONFIG_PCI_PWRCTL_SLOT) += pci-pwrctl-slot.o
-pci-pwrctl-slot-y := slot.o
+obj-$(CONFIG_PCI_PWRCTRL_SLOT) += pci-pwrctrl-slot.o
+pci-pwrctrl-slot-y := slot.o
diff --git a/drivers/pci/pwrctrl/core.c b/drivers/pci/pwrctrl/core.c
index 9cc7e2b7f2b5..6bdbfed584d6 100644
--- a/drivers/pci/pwrctrl/core.c
+++ b/drivers/pci/pwrctrl/core.c
@@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(pci_pwrctrl_device_set_ready);
*/
void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl)
{
+ cancel_work_sync(&pwrctrl->work);
+
/*
* We don't have to delete the link here. Typically, this function
* is only called when the power control device is being detached. If
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 94daca15a096..d7f4ee634263 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -38,14 +38,10 @@
static bool pcie_lbms_seen(struct pci_dev *dev, u16 lnksta)
{
- unsigned long count;
- int ret;
-
- ret = pcie_lbms_count(dev, &count);
- if (ret < 0)
- return lnksta & PCI_EXP_LNKSTA_LBMS;
+ if (test_bit(PCI_LINK_LBMS_SEEN, &dev->priv_flags))
+ return true;
- return count > 0;
+ return lnksta & PCI_EXP_LNKSTA_LBMS;
}
/*
@@ -4995,6 +4991,18 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
+static int pci_quirk_loongson_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Loongson PCIe Root Ports don't advertise an ACS capability, but
+ * they do not allow peer-to-peer transactions between Root Ports.
+ * Allow each Root Port to be in a separate IOMMU group by masking
+ * SV/RR/CR/UF bits.
+ */
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
/*
* Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on
* multi-function devices, the hardware isolates the functions by
@@ -5128,6 +5136,17 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
+ /* Loongson PCIe Root Ports */
+ { PCI_VENDOR_ID_LOONGSON, 0x3C09, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x3C19, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x3C29, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A09, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A19, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A29, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A39, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A49, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A59, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A69, pci_quirk_loongson_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
/* Zhaoxin multi-function devices */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index e994c546422c..07c3d021a47e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -776,8 +776,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
{
struct pci_dev *bridge = bus->self;
- pci_info(bridge, "PCI bridge to %pR\n",
- &bus->busn_res);
+ pci_info(bridge, "PCI bridge to %pR\n", &bus->busn_res);
if (type & IORESOURCE_IO)
pci_setup_bridge_io(bridge);
@@ -2302,8 +2301,8 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
/* Depth last, allocate resources and update the hardware. */
__pci_bus_assign_resources(bus, add_list, &fail_head);
- if (add_list)
- BUG_ON(!list_empty(add_list));
+ if (WARN_ON_ONCE(add_list && !list_empty(add_list)))
+ free_list(add_list);
tried_times++;
/* Any device complain? */
@@ -2365,7 +2364,8 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
pci_bridge_distribute_available_resources(bridge, &add_list);
__pci_bridge_assign_resources(bridge, &add_list, &fail_head);
- BUG_ON(!list_empty(&add_list));
+ if (WARN_ON_ONCE(!list_empty(&add_list)))
+ free_list(&add_list);
tried_times++;
if (list_empty(&fail_head))
@@ -2441,7 +2441,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
__pci_bus_size_bridges(bridge->subordinate, &added);
__pci_bridge_assign_resources(bridge, &added, &failed);
- BUG_ON(!list_empty(&added));
+ if (WARN_ON_ONCE(!list_empty(&added)))
+ free_list(&added);
if (!list_empty(&failed)) {
ret = -ENOSPC;
@@ -2497,6 +2498,7 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
__pci_bus_size_bridges(dev->subordinate, &add_list);
up_read(&pci_bus_sem);
__pci_bus_assign_resources(bus, &add_list, NULL);
- BUG_ON(!list_empty(&add_list));
+ if (WARN_ON_ONCE(!list_empty(&add_list)))
+ free_list(&add_list);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 45c8252c8edc..5e5cf2c3e2c8 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -72,7 +72,6 @@ int __ref cb_alloc(struct pcmcia_socket *s)
pci_lock_rescan_remove();
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
- pci_fixup_cardbus(bus);
max = bus->busn_res.start;
for (pass = 0; pass < 2; pass++)
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 5832dce8ed9d..4789eafb8bac 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -20,6 +20,7 @@
struct stm32_pwm_lp {
struct clk *clk;
struct regmap *regmap;
+ unsigned int num_cc_chans;
};
static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip)
@@ -30,13 +31,101 @@ static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip)
/* STM32 Low-Power Timer is preceded by a configurable power-of-2 prescaler */
#define STM32_LPTIM_MAX_PRESCALER 128
+static int stm32_pwm_lp_update_allowed(struct stm32_pwm_lp *priv, int channel)
+{
+ int ret;
+ u32 ccmr1;
+ unsigned long ccmr;
+
+ /* Only one PWM on this LPTIMER: enable, prescaler and reload value can be changed */
+ if (!priv->num_cc_chans)
+ return true;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CCMR1, &ccmr1);
+ if (ret)
+ return ret;
+ ccmr = ccmr1 & (STM32_LPTIM_CC1E | STM32_LPTIM_CC2E);
+
+ /* More than one channel enabled: enable, prescaler or ARR value can't be changed */
+ if (bitmap_weight(&ccmr, sizeof(u32) * BITS_PER_BYTE) > 1)
+ return false;
+
+ /*
+ * Only one channel is enabled (or none): check status on the other channel, to
+ * report if enable, prescaler or ARR value can be changed.
+ */
+ if (channel)
+ return !(ccmr1 & STM32_LPTIM_CC1E);
+ else
+ return !(ccmr1 & STM32_LPTIM_CC2E);
+}
+
+static int stm32_pwm_lp_compare_channel_apply(struct stm32_pwm_lp *priv, int channel,
+ bool enable, enum pwm_polarity polarity)
+{
+ u32 ccmr1, val, mask;
+ bool reenable;
+ int ret;
+
+ /* No dedicated CC channel: nothing to do */
+ if (!priv->num_cc_chans)
+ return 0;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CCMR1, &ccmr1);
+ if (ret)
+ return ret;
+
+ if (channel) {
+ /* Must disable CC channel (CCxE) to modify polarity (CCxP), then re-enable */
+ reenable = (enable && FIELD_GET(STM32_LPTIM_CC2E, ccmr1)) &&
+ (polarity != FIELD_GET(STM32_LPTIM_CC2P, ccmr1));
+
+ mask = STM32_LPTIM_CC2SEL | STM32_LPTIM_CC2E | STM32_LPTIM_CC2P;
+ val = FIELD_PREP(STM32_LPTIM_CC2P, polarity);
+ val |= FIELD_PREP(STM32_LPTIM_CC2E, enable);
+ } else {
+ reenable = (enable && FIELD_GET(STM32_LPTIM_CC1E, ccmr1)) &&
+ (polarity != FIELD_GET(STM32_LPTIM_CC1P, ccmr1));
+
+ mask = STM32_LPTIM_CC1SEL | STM32_LPTIM_CC1E | STM32_LPTIM_CC1P;
+ val = FIELD_PREP(STM32_LPTIM_CC1P, polarity);
+ val |= FIELD_PREP(STM32_LPTIM_CC1E, enable);
+ }
+
+ if (reenable) {
+ u32 cfgr, presc;
+ unsigned long rate;
+ unsigned int delay_us;
+
+ ret = regmap_update_bits(priv->regmap, STM32_LPTIM_CCMR1,
+ channel ? STM32_LPTIM_CC2E : STM32_LPTIM_CC1E, 0);
+ if (ret)
+ return ret;
+ /*
+ * After a write to the LPTIM_CCMRx register, a new write operation can only be
+ * performed after a delay of at least (PRESC × 3) clock cycles
+ */
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr);
+ if (ret)
+ return ret;
+ presc = FIELD_GET(STM32_LPTIM_PRESC, cfgr);
+ rate = clk_get_rate(priv->clk) >> presc;
+ if (!rate)
+ return -EINVAL;
+ delay_us = 3 * DIV_ROUND_UP(USEC_PER_SEC, rate);
+ usleep_range(delay_us, delay_us * 2);
+ }
+
+ return regmap_update_bits(priv->regmap, STM32_LPTIM_CCMR1, mask, val);
+}
+
static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip);
unsigned long long prd, div, dty;
struct pwm_state cstate;
- u32 val, mask, cfgr, presc = 0;
+ u32 arr, val, mask, cfgr, presc = 0;
bool reenable;
int ret;
@@ -45,10 +134,28 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
if (cstate.enabled) {
- /* Disable LP timer */
- ret = regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+ /* Disable CC channel if any */
+ ret = stm32_pwm_lp_compare_channel_apply(priv, pwm->hwpwm, false,
+ state->polarity);
if (ret)
return ret;
+ ret = regmap_write(priv->regmap, pwm->hwpwm ?
+ STM32_LPTIM_CCR2 : STM32_LPTIM_CMP, 0);
+ if (ret)
+ return ret;
+
+ /* Check if the timer can be disabled */
+ ret = stm32_pwm_lp_update_allowed(priv, pwm->hwpwm);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ /* Disable LP timer */
+ ret = regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+ if (ret)
+ return ret;
+ }
+
/* disable clock to PWM counter */
clk_disable(priv->clk);
}
@@ -79,6 +186,23 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
dty = prd * state->duty_cycle;
do_div(dty, state->period);
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr);
+ if (ret)
+ return ret;
+
+ /*
+ * When there are several channels, they share the same prescaler and reload value.
+ * Check if this can be changed, or the values are the same for all channels.
+ */
+ if (!stm32_pwm_lp_update_allowed(priv, pwm->hwpwm)) {
+ ret = regmap_read(priv->regmap, STM32_LPTIM_ARR, &arr);
+ if (ret)
+ return ret;
+
+ if ((FIELD_GET(STM32_LPTIM_PRESC, cfgr) != presc) || (arr != prd - 1))
+ return -EBUSY;
+ }
+
if (!cstate.enabled) {
/* enable clock to drive PWM counter */
ret = clk_enable(priv->clk);
@@ -86,15 +210,20 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
- ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr);
- if (ret)
- goto err;
-
if ((FIELD_GET(STM32_LPTIM_PRESC, cfgr) != presc) ||
- (FIELD_GET(STM32_LPTIM_WAVPOL, cfgr) != state->polarity)) {
+ ((FIELD_GET(STM32_LPTIM_WAVPOL, cfgr) != state->polarity) && !priv->num_cc_chans)) {
val = FIELD_PREP(STM32_LPTIM_PRESC, presc);
- val |= FIELD_PREP(STM32_LPTIM_WAVPOL, state->polarity);
- mask = STM32_LPTIM_PRESC | STM32_LPTIM_WAVPOL;
+ mask = STM32_LPTIM_PRESC;
+
+ if (!priv->num_cc_chans) {
+ /*
+ * WAVPOL bit is only available when no capature compare channel is used,
+ * e.g. on LPTIMER instances that have only one output channel. CCMR1 is
+ * used otherwise.
+ */
+ val |= FIELD_PREP(STM32_LPTIM_WAVPOL, state->polarity);
+ mask |= STM32_LPTIM_WAVPOL;
+ }
/* Must disable LP timer to modify CFGR */
reenable = true;
@@ -120,20 +249,27 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret)
goto err;
- ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, prd - (1 + dty));
+ /* Write CMP/CCRx register and ensure it's been properly written */
+ ret = regmap_write(priv->regmap, pwm->hwpwm ? STM32_LPTIM_CCR2 : STM32_LPTIM_CMP,
+ prd - (1 + dty));
if (ret)
goto err;
- /* ensure CMP & ARR registers are properly written */
- ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+ /* ensure ARR and CMP/CCRx registers are properly written */
+ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, pwm->hwpwm ?
+ (val & STM32_LPTIM_CMP2_ARROK) == STM32_LPTIM_CMP2_ARROK :
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret) {
dev_err(pwmchip_parent(chip), "ARR/CMP registers write issue\n");
goto err;
}
- ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
- STM32_LPTIM_CMPOKCF_ARROKCF);
+ ret = regmap_write(priv->regmap, STM32_LPTIM_ICR, pwm->hwpwm ?
+ STM32_LPTIM_CMP2OKCF_ARROKCF : STM32_LPTIM_CMPOKCF_ARROKCF);
+ if (ret)
+ goto err;
+
+ ret = stm32_pwm_lp_compare_channel_apply(priv, pwm->hwpwm, true, state->polarity);
if (ret)
goto err;
@@ -161,11 +297,22 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
{
struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip);
unsigned long rate = clk_get_rate(priv->clk);
- u32 val, presc, prd;
+ u32 val, presc, prd, ccmr1;
+ bool enabled;
u64 tmp;
regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
- state->enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
+ enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
+ if (priv->num_cc_chans) {
+ /* There's a CC chan, need to also check if it's enabled */
+ regmap_read(priv->regmap, STM32_LPTIM_CCMR1, &ccmr1);
+ if (pwm->hwpwm)
+ enabled &= !!FIELD_GET(STM32_LPTIM_CC2E, ccmr1);
+ else
+ enabled &= !!FIELD_GET(STM32_LPTIM_CC1E, ccmr1);
+ }
+ state->enabled = enabled;
+
/* Keep PWM counter clock refcount in sync with PWM initial state */
if (state->enabled) {
int ret = clk_enable(priv->clk);
@@ -176,14 +323,21 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
regmap_read(priv->regmap, STM32_LPTIM_CFGR, &val);
presc = FIELD_GET(STM32_LPTIM_PRESC, val);
- state->polarity = FIELD_GET(STM32_LPTIM_WAVPOL, val);
+ if (priv->num_cc_chans) {
+ if (pwm->hwpwm)
+ state->polarity = FIELD_GET(STM32_LPTIM_CC2P, ccmr1);
+ else
+ state->polarity = FIELD_GET(STM32_LPTIM_CC1P, ccmr1);
+ } else {
+ state->polarity = FIELD_GET(STM32_LPTIM_WAVPOL, val);
+ }
regmap_read(priv->regmap, STM32_LPTIM_ARR, &prd);
tmp = prd + 1;
tmp = (tmp << presc) * NSEC_PER_SEC;
state->period = DIV_ROUND_CLOSEST_ULL(tmp, rate);
- regmap_read(priv->regmap, STM32_LPTIM_CMP, &val);
+ regmap_read(priv->regmap, pwm->hwpwm ? STM32_LPTIM_CCR2 : STM32_LPTIM_CMP, &val);
tmp = prd - val;
tmp = (tmp << presc) * NSEC_PER_SEC;
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate);
@@ -201,15 +355,25 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
struct stm32_pwm_lp *priv;
struct pwm_chip *chip;
+ unsigned int npwm;
int ret;
- chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*priv));
+ if (!ddata->num_cc_chans) {
+ /* No dedicated CC channel, so there's only one PWM channel */
+ npwm = 1;
+ } else {
+ /* There are dedicated CC channels, each with one PWM output */
+ npwm = ddata->num_cc_chans;
+ }
+
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*priv));
if (IS_ERR(chip))
return PTR_ERR(chip);
priv = to_stm32_pwm_lp(chip);
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
+ priv->num_cc_chans = ddata->num_cc_chans;
chip->ops = &stm32_pwm_lp_ops;
ret = devm_pwmchip_add(&pdev->dev, chip);
@@ -225,12 +389,15 @@ static int stm32_pwm_lp_suspend(struct device *dev)
{
struct pwm_chip *chip = dev_get_drvdata(dev);
struct pwm_state state;
-
- pwm_get_state(&chip->pwms[0], &state);
- if (state.enabled) {
- dev_err(dev, "The consumer didn't stop us (%s)\n",
- chip->pwms[0].label);
- return -EBUSY;
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ pwm_get_state(&chip->pwms[i], &state);
+ if (state.enabled) {
+ dev_err(dev, "The consumer didn't stop us (%s)\n",
+ chip->pwms[i].label);
+ return -EBUSY;
+ }
}
return pinctrl_pm_select_sleep_state(dev);
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 9f0cda46b015..50414f4cb109 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -18,112 +18,236 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
-/* I2C slave 0 registers */
-#define BCM590XX_RFLDOPMCTRL1 0x60
-#define BCM590XX_IOSR1PMCTRL1 0x7a
-#define BCM590XX_IOSR2PMCTRL1 0x7c
-#define BCM590XX_CSRPMCTRL1 0x7e
-#define BCM590XX_SDSR1PMCTRL1 0x82
-#define BCM590XX_SDSR2PMCTRL1 0x86
-#define BCM590XX_MSRPMCTRL1 0x8a
-#define BCM590XX_VSRPMCTRL1 0x8e
-#define BCM590XX_RFLDOCTRL 0x96
-#define BCM590XX_CSRVOUT1 0xc0
-
-/* I2C slave 1 registers */
-#define BCM590XX_GPLDO5PMCTRL1 0x16
-#define BCM590XX_GPLDO6PMCTRL1 0x18
-#define BCM590XX_GPLDO1CTRL 0x1a
-#define BCM590XX_GPLDO2CTRL 0x1b
-#define BCM590XX_GPLDO3CTRL 0x1c
-#define BCM590XX_GPLDO4CTRL 0x1d
-#define BCM590XX_GPLDO5CTRL 0x1e
-#define BCM590XX_GPLDO6CTRL 0x1f
-#define BCM590XX_OTG_CTRL 0x40
-#define BCM590XX_GPLDO1PMCTRL1 0x57
-#define BCM590XX_GPLDO2PMCTRL1 0x59
-#define BCM590XX_GPLDO3PMCTRL1 0x5b
-#define BCM590XX_GPLDO4PMCTRL1 0x5d
-
#define BCM590XX_REG_ENABLE BIT(7)
#define BCM590XX_VBUS_ENABLE BIT(2)
#define BCM590XX_LDO_VSEL_MASK GENMASK(5, 3)
#define BCM590XX_SR_VSEL_MASK GENMASK(5, 0)
+enum bcm590xx_reg_type {
+ BCM590XX_REG_TYPE_LDO,
+ BCM590XX_REG_TYPE_GPLDO,
+ BCM590XX_REG_TYPE_SR,
+ BCM590XX_REG_TYPE_VBUS
+};
+
+struct bcm590xx_reg_data {
+ enum bcm590xx_reg_type type;
+ enum bcm590xx_regmap_type regmap;
+ const struct regulator_desc desc;
+};
+
+struct bcm590xx_reg {
+ struct bcm590xx *mfd;
+ unsigned int n_regulators;
+ const struct bcm590xx_reg_data *regs;
+};
+
+static const struct regulator_ops bcm590xx_ops_ldo = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
+/*
+ * LDO ops without voltage selection, used for MICLDO on BCM59054.
+ * (These are currently the same as VBUS ops, but will be different
+ * in the future once full PMMODE support is implemented.)
+ */
+static const struct regulator_ops bcm590xx_ops_ldo_novolt = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+static const struct regulator_ops bcm590xx_ops_dcdc = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_ops bcm590xx_ops_vbus = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+#define BCM590XX_REG_DESC(_model, _name, _name_lower) \
+ .id = _model##_REG_##_name, \
+ .name = #_name_lower, \
+ .of_match = of_match_ptr(#_name_lower), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE \
+
+#define BCM590XX_LDO_DESC(_model, _model_lower, _name, _name_lower, _table) \
+ BCM590XX_REG_DESC(_model, _name, _name_lower), \
+ .ops = &bcm590xx_ops_ldo, \
+ .n_voltages = ARRAY_SIZE(_model_lower##_##_table), \
+ .volt_table = _model_lower##_##_table, \
+ .vsel_reg = _model##_##_name##CTRL, \
+ .vsel_mask = BCM590XX_LDO_VSEL_MASK, \
+ .enable_reg = _model##_##_name##PMCTRL1, \
+ .enable_mask = BCM590XX_REG_ENABLE, \
+ .enable_is_inverted = true
+
+#define BCM590XX_SR_DESC(_model, _model_lower, _name, _name_lower, _ranges) \
+ BCM590XX_REG_DESC(_model, _name, _name_lower), \
+ .ops = &bcm590xx_ops_dcdc, \
+ .n_voltages = 64, \
+ .linear_ranges = _model_lower##_##_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(_model_lower##_##_ranges), \
+ .vsel_reg = _model##_##_name##VOUT1, \
+ .vsel_mask = BCM590XX_SR_VSEL_MASK, \
+ .enable_reg = _model##_##_name##PMCTRL1, \
+ .enable_mask = BCM590XX_REG_ENABLE, \
+ .enable_is_inverted = true
+
+#define BCM59056_REG_DESC(_name, _name_lower) \
+ BCM590XX_REG_DESC(BCM59056, _name, _name_lower)
+#define BCM59056_LDO_DESC(_name, _name_lower, _table) \
+ BCM590XX_LDO_DESC(BCM59056, bcm59056, _name, _name_lower, _table)
+#define BCM59056_SR_DESC(_name, _name_lower, _ranges) \
+ BCM590XX_SR_DESC(BCM59056, bcm59056, _name, _name_lower, _ranges)
+
+#define BCM59054_REG_DESC(_name, _name_lower) \
+ BCM590XX_REG_DESC(BCM59054, _name, _name_lower)
+#define BCM59054_LDO_DESC(_name, _name_lower, _table) \
+ BCM590XX_LDO_DESC(BCM59054, bcm59054, _name, _name_lower, _table)
+#define BCM59054_SR_DESC(_name, _name_lower, _ranges) \
+ BCM590XX_SR_DESC(BCM59054, bcm59054, _name, _name_lower, _ranges)
+
+/* BCM59056 data */
+
+/* I2C slave 0 registers */
+#define BCM59056_RFLDOPMCTRL1 0x60
+#define BCM59056_CAMLDO1PMCTRL1 0x62
+#define BCM59056_CAMLDO2PMCTRL1 0x64
+#define BCM59056_SIMLDO1PMCTRL1 0x66
+#define BCM59056_SIMLDO2PMCTRL1 0x68
+#define BCM59056_SDLDOPMCTRL1 0x6a
+#define BCM59056_SDXLDOPMCTRL1 0x6c
+#define BCM59056_MMCLDO1PMCTRL1 0x6e
+#define BCM59056_MMCLDO2PMCTRL1 0x70
+#define BCM59056_AUDLDOPMCTRL1 0x72
+#define BCM59056_MICLDOPMCTRL1 0x74
+#define BCM59056_USBLDOPMCTRL1 0x76
+#define BCM59056_VIBLDOPMCTRL1 0x78
+#define BCM59056_IOSR1PMCTRL1 0x7a
+#define BCM59056_IOSR2PMCTRL1 0x7c
+#define BCM59056_CSRPMCTRL1 0x7e
+#define BCM59056_SDSR1PMCTRL1 0x82
+#define BCM59056_SDSR2PMCTRL1 0x86
+#define BCM59056_MSRPMCTRL1 0x8a
+#define BCM59056_VSRPMCTRL1 0x8e
+#define BCM59056_RFLDOCTRL 0x96
+#define BCM59056_CAMLDO1CTRL 0x97
+#define BCM59056_CAMLDO2CTRL 0x98
+#define BCM59056_SIMLDO1CTRL 0x99
+#define BCM59056_SIMLDO2CTRL 0x9a
+#define BCM59056_SDLDOCTRL 0x9b
+#define BCM59056_SDXLDOCTRL 0x9c
+#define BCM59056_MMCLDO1CTRL 0x9d
+#define BCM59056_MMCLDO2CTRL 0x9e
+#define BCM59056_AUDLDOCTRL 0x9f
+#define BCM59056_MICLDOCTRL 0xa0
+#define BCM59056_USBLDOCTRL 0xa1
+#define BCM59056_VIBLDOCTRL 0xa2
+#define BCM59056_CSRVOUT1 0xc0
+#define BCM59056_IOSR1VOUT1 0xc3
+#define BCM59056_IOSR2VOUT1 0xc6
+#define BCM59056_MSRVOUT1 0xc9
+#define BCM59056_SDSR1VOUT1 0xcc
+#define BCM59056_SDSR2VOUT1 0xcf
+#define BCM59056_VSRVOUT1 0xd2
+
+/* I2C slave 1 registers */
+#define BCM59056_GPLDO5PMCTRL1 0x16
+#define BCM59056_GPLDO6PMCTRL1 0x18
+#define BCM59056_GPLDO1CTRL 0x1a
+#define BCM59056_GPLDO2CTRL 0x1b
+#define BCM59056_GPLDO3CTRL 0x1c
+#define BCM59056_GPLDO4CTRL 0x1d
+#define BCM59056_GPLDO5CTRL 0x1e
+#define BCM59056_GPLDO6CTRL 0x1f
+#define BCM59056_OTG_CTRL 0x40
+#define BCM59056_GPLDO1PMCTRL1 0x57
+#define BCM59056_GPLDO2PMCTRL1 0x59
+#define BCM59056_GPLDO3PMCTRL1 0x5b
+#define BCM59056_GPLDO4PMCTRL1 0x5d
+
/*
* RFLDO to VSR regulators are
* accessed via I2C slave 0
*/
/* LDO regulator IDs */
-#define BCM590XX_REG_RFLDO 0
-#define BCM590XX_REG_CAMLDO1 1
-#define BCM590XX_REG_CAMLDO2 2
-#define BCM590XX_REG_SIMLDO1 3
-#define BCM590XX_REG_SIMLDO2 4
-#define BCM590XX_REG_SDLDO 5
-#define BCM590XX_REG_SDXLDO 6
-#define BCM590XX_REG_MMCLDO1 7
-#define BCM590XX_REG_MMCLDO2 8
-#define BCM590XX_REG_AUDLDO 9
-#define BCM590XX_REG_MICLDO 10
-#define BCM590XX_REG_USBLDO 11
-#define BCM590XX_REG_VIBLDO 12
+#define BCM59056_REG_RFLDO 0
+#define BCM59056_REG_CAMLDO1 1
+#define BCM59056_REG_CAMLDO2 2
+#define BCM59056_REG_SIMLDO1 3
+#define BCM59056_REG_SIMLDO2 4
+#define BCM59056_REG_SDLDO 5
+#define BCM59056_REG_SDXLDO 6
+#define BCM59056_REG_MMCLDO1 7
+#define BCM59056_REG_MMCLDO2 8
+#define BCM59056_REG_AUDLDO 9
+#define BCM59056_REG_MICLDO 10
+#define BCM59056_REG_USBLDO 11
+#define BCM59056_REG_VIBLDO 12
/* DCDC regulator IDs */
-#define BCM590XX_REG_CSR 13
-#define BCM590XX_REG_IOSR1 14
-#define BCM590XX_REG_IOSR2 15
-#define BCM590XX_REG_MSR 16
-#define BCM590XX_REG_SDSR1 17
-#define BCM590XX_REG_SDSR2 18
-#define BCM590XX_REG_VSR 19
+#define BCM59056_REG_CSR 13
+#define BCM59056_REG_IOSR1 14
+#define BCM59056_REG_IOSR2 15
+#define BCM59056_REG_MSR 16
+#define BCM59056_REG_SDSR1 17
+#define BCM59056_REG_SDSR2 18
+#define BCM59056_REG_VSR 19
/*
* GPLDO1 to VBUS regulators are
* accessed via I2C slave 1
*/
-#define BCM590XX_REG_GPLDO1 20
-#define BCM590XX_REG_GPLDO2 21
-#define BCM590XX_REG_GPLDO3 22
-#define BCM590XX_REG_GPLDO4 23
-#define BCM590XX_REG_GPLDO5 24
-#define BCM590XX_REG_GPLDO6 25
-#define BCM590XX_REG_VBUS 26
+#define BCM59056_REG_GPLDO1 20
+#define BCM59056_REG_GPLDO2 21
+#define BCM59056_REG_GPLDO3 22
+#define BCM59056_REG_GPLDO4 23
+#define BCM59056_REG_GPLDO5 24
+#define BCM59056_REG_GPLDO6 25
+#define BCM59056_REG_VBUS 26
-#define BCM590XX_NUM_REGS 27
-
-#define BCM590XX_REG_IS_LDO(n) (n < BCM590XX_REG_CSR)
-#define BCM590XX_REG_IS_GPLDO(n) \
- ((n > BCM590XX_REG_VSR) && (n < BCM590XX_REG_VBUS))
-#define BCM590XX_REG_IS_VBUS(n) (n == BCM590XX_REG_VBUS)
+#define BCM59056_NUM_REGS 27
/* LDO group A: supported voltages in microvolts */
-static const unsigned int ldo_a_table[] = {
+static const unsigned int bcm59056_ldo_a_table[] = {
1200000, 1800000, 2500000, 2700000, 2800000,
2900000, 3000000, 3300000,
};
/* LDO group C: supported voltages in microvolts */
-static const unsigned int ldo_c_table[] = {
+static const unsigned int bcm59056_ldo_c_table[] = {
3100000, 1800000, 2500000, 2700000, 2800000,
2900000, 3000000, 3300000,
};
-static const unsigned int ldo_vbus[] = {
- 5000000,
-};
-
/* DCDC group CSR: supported voltages in microvolts */
-static const struct linear_range dcdc_csr_ranges[] = {
+static const struct linear_range bcm59056_dcdc_csr_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
REGULATOR_LINEAR_RANGE(1360000, 51, 55, 20000),
REGULATOR_LINEAR_RANGE(900000, 56, 63, 0),
};
/* DCDC group IOSR1: supported voltages in microvolts */
-static const struct linear_range dcdc_iosr1_ranges[] = {
+static const struct linear_range bcm59056_dcdc_iosr1_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 51, 10000),
REGULATOR_LINEAR_RANGE(1500000, 52, 52, 0),
REGULATOR_LINEAR_RANGE(1800000, 53, 53, 0),
@@ -131,155 +255,854 @@ static const struct linear_range dcdc_iosr1_ranges[] = {
};
/* DCDC group SDSR1: supported voltages in microvolts */
-static const struct linear_range dcdc_sdsr1_ranges[] = {
+static const struct linear_range bcm59056_dcdc_sdsr1_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
REGULATOR_LINEAR_RANGE(1340000, 51, 51, 0),
REGULATOR_LINEAR_RANGE(900000, 52, 63, 0),
};
-struct bcm590xx_info {
- const char *name;
- const char *vin_name;
- u8 n_voltages;
- const unsigned int *volt_table;
- u8 n_linear_ranges;
- const struct linear_range *linear_ranges;
-};
+static const struct bcm590xx_reg_data bcm59056_regs[BCM59056_NUM_REGS] = {
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(RFLDO, rfldo, ldo_a_table),
+ },
+ },
-#define BCM590XX_REG_TABLE(_name, _table) \
- { \
- .name = #_name, \
- .n_voltages = ARRAY_SIZE(_table), \
- .volt_table = _table, \
- }
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(CAMLDO1, camldo1, ldo_c_table),
+ },
+ },
-#define BCM590XX_REG_RANGES(_name, _ranges) \
- { \
- .name = #_name, \
- .n_voltages = 64, \
- .n_linear_ranges = ARRAY_SIZE(_ranges), \
- .linear_ranges = _ranges, \
- }
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(CAMLDO2, camldo2, ldo_c_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SIMLDO1, simldo1, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SIMLDO2, simldo2, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SDLDO, sdldo, ldo_c_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SDXLDO, sdxldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(MMCLDO1, mmcldo1, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(MMCLDO2, mmcldo2, ldo_a_table),
+ },
+ },
-static struct bcm590xx_info bcm590xx_regs[] = {
- BCM590XX_REG_TABLE(rfldo, ldo_a_table),
- BCM590XX_REG_TABLE(camldo1, ldo_c_table),
- BCM590XX_REG_TABLE(camldo2, ldo_c_table),
- BCM590XX_REG_TABLE(simldo1, ldo_a_table),
- BCM590XX_REG_TABLE(simldo2, ldo_a_table),
- BCM590XX_REG_TABLE(sdldo, ldo_c_table),
- BCM590XX_REG_TABLE(sdxldo, ldo_a_table),
- BCM590XX_REG_TABLE(mmcldo1, ldo_a_table),
- BCM590XX_REG_TABLE(mmcldo2, ldo_a_table),
- BCM590XX_REG_TABLE(audldo, ldo_a_table),
- BCM590XX_REG_TABLE(micldo, ldo_a_table),
- BCM590XX_REG_TABLE(usbldo, ldo_a_table),
- BCM590XX_REG_TABLE(vibldo, ldo_c_table),
- BCM590XX_REG_RANGES(csr, dcdc_csr_ranges),
- BCM590XX_REG_RANGES(iosr1, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(iosr2, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(msr, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(sdsr1, dcdc_sdsr1_ranges),
- BCM590XX_REG_RANGES(sdsr2, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(vsr, dcdc_iosr1_ranges),
- BCM590XX_REG_TABLE(gpldo1, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo2, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo3, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo4, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo5, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo6, ldo_a_table),
- BCM590XX_REG_TABLE(vbus, ldo_vbus),
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(AUDLDO, audldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(MICLDO, micldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(USBLDO, usbldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(VIBLDO, vibldo, ldo_c_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(CSR, csr, dcdc_csr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(IOSR1, iosr1, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(IOSR2, iosr2, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(MSR, msr, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(SDSR1, sdsr1, dcdc_sdsr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(SDSR2, sdsr2, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(VSR, vsr, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO1, gpldo1, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO2, gpldo2, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO3, gpldo3, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO4, gpldo4, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO5, gpldo5, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO6, gpldo6, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_VBUS,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_REG_DESC(VBUS, vbus),
+ .ops = &bcm590xx_ops_vbus,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .enable_reg = BCM59056_OTG_CTRL,
+ .enable_mask = BCM590XX_VBUS_ENABLE,
+ },
+ },
};
-struct bcm590xx_reg {
- struct regulator_desc *desc;
- struct bcm590xx *mfd;
+/* BCM59054 data */
+
+/* I2C slave 0 registers */
+#define BCM59054_RFLDOPMCTRL1 0x60
+#define BCM59054_CAMLDO1PMCTRL1 0x62
+#define BCM59054_CAMLDO2PMCTRL1 0x64
+#define BCM59054_SIMLDO1PMCTRL1 0x66
+#define BCM59054_SIMLDO2PMCTRL1 0x68
+#define BCM59054_SDLDOPMCTRL1 0x6a
+#define BCM59054_SDXLDOPMCTRL1 0x6c
+#define BCM59054_MMCLDO1PMCTRL1 0x6e
+#define BCM59054_MMCLDO2PMCTRL1 0x70
+#define BCM59054_AUDLDOPMCTRL1 0x72
+#define BCM59054_MICLDOPMCTRL1 0x74
+#define BCM59054_USBLDOPMCTRL1 0x76
+#define BCM59054_VIBLDOPMCTRL1 0x78
+#define BCM59054_IOSR1PMCTRL1 0x7a
+#define BCM59054_IOSR2PMCTRL1 0x7c
+#define BCM59054_CSRPMCTRL1 0x7e
+#define BCM59054_SDSR1PMCTRL1 0x82
+#define BCM59054_SDSR2PMCTRL1 0x86
+#define BCM59054_MMSRPMCTRL1 0x8a
+#define BCM59054_VSRPMCTRL1 0x8e
+#define BCM59054_RFLDOCTRL 0x96
+#define BCM59054_CAMLDO1CTRL 0x97
+#define BCM59054_CAMLDO2CTRL 0x98
+#define BCM59054_SIMLDO1CTRL 0x99
+#define BCM59054_SIMLDO2CTRL 0x9a
+#define BCM59054_SDLDOCTRL 0x9b
+#define BCM59054_SDXLDOCTRL 0x9c
+#define BCM59054_MMCLDO1CTRL 0x9d
+#define BCM59054_MMCLDO2CTRL 0x9e
+#define BCM59054_AUDLDOCTRL 0x9f
+#define BCM59054_MICLDOCTRL 0xa0
+#define BCM59054_USBLDOCTRL 0xa1
+#define BCM59054_VIBLDOCTRL 0xa2
+#define BCM59054_CSRVOUT1 0xc0
+#define BCM59054_IOSR1VOUT1 0xc3
+#define BCM59054_IOSR2VOUT1 0xc6
+#define BCM59054_MMSRVOUT1 0xc9
+#define BCM59054_SDSR1VOUT1 0xcc
+#define BCM59054_SDSR2VOUT1 0xcf
+#define BCM59054_VSRVOUT1 0xd2
+
+/* I2C slave 1 registers */
+#define BCM59054_LVLDO1PMCTRL1 0x16
+#define BCM59054_LVLDO2PMCTRL1 0x18
+#define BCM59054_GPLDO1CTRL 0x1a
+#define BCM59054_GPLDO2CTRL 0x1b
+#define BCM59054_GPLDO3CTRL 0x1c
+#define BCM59054_TCXLDOCTRL 0x1d
+#define BCM59054_LVLDO1CTRL 0x1e
+#define BCM59054_LVLDO2CTRL 0x1f
+#define BCM59054_OTG_CTRL 0x40
+#define BCM59054_GPLDO1PMCTRL1 0x57
+#define BCM59054_GPLDO2PMCTRL1 0x59
+#define BCM59054_GPLDO3PMCTRL1 0x5b
+#define BCM59054_TCXLDOPMCTRL1 0x5d
+
+/*
+ * RFLDO to VSR regulators are
+ * accessed via I2C slave 0
+ */
+
+/* LDO regulator IDs */
+#define BCM59054_REG_RFLDO 0
+#define BCM59054_REG_CAMLDO1 1
+#define BCM59054_REG_CAMLDO2 2
+#define BCM59054_REG_SIMLDO1 3
+#define BCM59054_REG_SIMLDO2 4
+#define BCM59054_REG_SDLDO 5
+#define BCM59054_REG_SDXLDO 6
+#define BCM59054_REG_MMCLDO1 7
+#define BCM59054_REG_MMCLDO2 8
+#define BCM59054_REG_AUDLDO 9
+#define BCM59054_REG_MICLDO 10
+#define BCM59054_REG_USBLDO 11
+#define BCM59054_REG_VIBLDO 12
+
+/* DCDC regulator IDs */
+#define BCM59054_REG_CSR 13
+#define BCM59054_REG_IOSR1 14
+#define BCM59054_REG_IOSR2 15
+#define BCM59054_REG_MMSR 16
+#define BCM59054_REG_SDSR1 17
+#define BCM59054_REG_SDSR2 18
+#define BCM59054_REG_VSR 19
+
+/*
+ * GPLDO1 to VBUS regulators are
+ * accessed via I2C slave 1
+ */
+
+#define BCM59054_REG_GPLDO1 20
+#define BCM59054_REG_GPLDO2 21
+#define BCM59054_REG_GPLDO3 22
+#define BCM59054_REG_TCXLDO 23
+#define BCM59054_REG_LVLDO1 24
+#define BCM59054_REG_LVLDO2 25
+#define BCM59054_REG_VBUS 26
+
+#define BCM59054_NUM_REGS 27
+
+/* LDO group 1: supported voltages in microvolts */
+static const unsigned int bcm59054_ldo_1_table[] = {
+ 1200000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
};
-static int bcm590xx_get_vsel_register(int id)
-{
- if (BCM590XX_REG_IS_LDO(id))
- return BCM590XX_RFLDOCTRL + id;
- else if (BCM590XX_REG_IS_GPLDO(id))
- return BCM590XX_GPLDO1CTRL + id;
- else
- return BCM590XX_CSRVOUT1 + (id - BCM590XX_REG_CSR) * 3;
-}
+/* LDO group 2: supported voltages in microvolts */
+static const unsigned int bcm59054_ldo_2_table[] = {
+ 3100000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
+};
-static int bcm590xx_get_enable_register(int id)
-{
- int reg = 0;
-
- if (BCM590XX_REG_IS_LDO(id))
- reg = BCM590XX_RFLDOPMCTRL1 + id * 2;
- else if (BCM590XX_REG_IS_GPLDO(id))
- reg = BCM590XX_GPLDO1PMCTRL1 + id * 2;
- else
- switch (id) {
- case BCM590XX_REG_CSR:
- reg = BCM590XX_CSRPMCTRL1;
- break;
- case BCM590XX_REG_IOSR1:
- reg = BCM590XX_IOSR1PMCTRL1;
- break;
- case BCM590XX_REG_IOSR2:
- reg = BCM590XX_IOSR2PMCTRL1;
- break;
- case BCM590XX_REG_MSR:
- reg = BCM590XX_MSRPMCTRL1;
- break;
- case BCM590XX_REG_SDSR1:
- reg = BCM590XX_SDSR1PMCTRL1;
- break;
- case BCM590XX_REG_SDSR2:
- reg = BCM590XX_SDSR2PMCTRL1;
- break;
- case BCM590XX_REG_VSR:
- reg = BCM590XX_VSRPMCTRL1;
- break;
- case BCM590XX_REG_VBUS:
- reg = BCM590XX_OTG_CTRL;
- break;
- }
+/* LDO group 3: supported voltages in microvolts */
+static const unsigned int bcm59054_ldo_3_table[] = {
+ 1000000, 1107000, 1143000, 1214000, 1250000,
+ 1464000, 1500000, 1786000,
+};
+/* DCDC group SR: supported voltages in microvolts */
+static const struct linear_range bcm59054_dcdc_sr_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0, 1, 0),
+ REGULATOR_LINEAR_RANGE(860000, 2, 60, 10000),
+ REGULATOR_LINEAR_RANGE(1500000, 61, 61, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 62, 62, 0),
+ REGULATOR_LINEAR_RANGE(900000, 63, 63, 0),
+};
- return reg;
-}
+/* DCDC group VSR (BCM59054A1): supported voltages in microvolts */
+static const struct linear_range bcm59054_dcdc_vsr_a1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0, 1, 0),
+ REGULATOR_LINEAR_RANGE(860000, 2, 59, 10000),
+ REGULATOR_LINEAR_RANGE(1700000, 60, 60, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 61, 61, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 62, 62, 0),
+ REGULATOR_LINEAR_RANGE(1600000, 63, 63, 0),
+};
-static const struct regulator_ops bcm590xx_ops_ldo = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_iterate,
+/* DCDC group CSR: supported voltages in microvolts */
+static const struct linear_range bcm59054_dcdc_csr_ranges[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 1, 100000),
+ REGULATOR_LINEAR_RANGE(860000, 2, 60, 10000),
+ REGULATOR_LINEAR_RANGE(900000, 61, 63, 0),
};
-static const struct regulator_ops bcm590xx_ops_dcdc = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
+static const struct bcm590xx_reg_data bcm59054_regs[BCM59054_NUM_REGS] = {
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(RFLDO, rfldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO1, camldo1, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO2, camldo2, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO1, simldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO2, simldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDLDO, sdldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDXLDO, sdxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO1, mmcldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO2, mmcldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(AUDLDO, audldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_REG_DESC(MICLDO, micldo),
+ .ops = &bcm590xx_ops_ldo_novolt,
+ /* MICLDO is locked at 1.8V */
+ .n_voltages = 1,
+ .fixed_uV = 1800000,
+ .enable_reg = BCM59054_MICLDOPMCTRL1,
+ .enable_mask = BCM590XX_REG_ENABLE,
+ .enable_is_inverted = true,
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(USBLDO, usbldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(VIBLDO, vibldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(CSR, csr, dcdc_csr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR1, iosr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR2, iosr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(MMSR, mmsr, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR1, sdsr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR2, sdsr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(VSR, vsr, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO1, gpldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO2, gpldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO3, gpldo3, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(TCXLDO, tcxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO1, lvldo1, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO2, lvldo2, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_VBUS,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_REG_DESC(VBUS, vbus),
+ .ops = &bcm590xx_ops_vbus,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .enable_reg = BCM59054_OTG_CTRL,
+ .enable_mask = BCM590XX_VBUS_ENABLE,
+ },
+ },
};
-static const struct regulator_ops bcm590xx_ops_vbus = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
+/*
+ * BCM59054A1 regulators; same as previous revision, but with different
+ * VSR voltage table.
+ */
+static const struct bcm590xx_reg_data bcm59054_a1_regs[BCM59054_NUM_REGS] = {
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(RFLDO, rfldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO1, camldo1, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO2, camldo2, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO1, simldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO2, simldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDLDO, sdldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDXLDO, sdxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO1, mmcldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO2, mmcldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(AUDLDO, audldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_REG_DESC(MICLDO, micldo),
+ .ops = &bcm590xx_ops_ldo_novolt,
+ /* MICLDO is locked at 1.8V */
+ .n_voltages = 1,
+ .fixed_uV = 1800000,
+ .enable_reg = BCM59054_MICLDOPMCTRL1,
+ .enable_mask = BCM590XX_REG_ENABLE,
+ .enable_is_inverted = true,
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(USBLDO, usbldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(VIBLDO, vibldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(CSR, csr, dcdc_csr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR1, iosr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR2, iosr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(MMSR, mmsr, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR1, sdsr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR2, sdsr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(VSR, vsr, dcdc_vsr_a1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO1, gpldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO2, gpldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO3, gpldo3, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(TCXLDO, tcxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO1, lvldo1, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO2, lvldo2, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_VBUS,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_REG_DESC(VBUS, vbus),
+ .ops = &bcm590xx_ops_vbus,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .enable_reg = BCM59054_OTG_CTRL,
+ .enable_mask = BCM590XX_VBUS_ENABLE,
+ },
+ },
};
static int bcm590xx_probe(struct platform_device *pdev)
{
struct bcm590xx *bcm590xx = dev_get_drvdata(pdev->dev.parent);
struct bcm590xx_reg *pmu;
+ const struct bcm590xx_reg_data *info;
struct regulator_config config = { };
- struct bcm590xx_info *info;
struct regulator_dev *rdev;
- int i;
+ unsigned int i;
pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
@@ -287,65 +1110,53 @@ static int bcm590xx_probe(struct platform_device *pdev)
pmu->mfd = bcm590xx;
- platform_set_drvdata(pdev, pmu);
-
- pmu->desc = devm_kcalloc(&pdev->dev,
- BCM590XX_NUM_REGS,
- sizeof(struct regulator_desc),
- GFP_KERNEL);
- if (!pmu->desc)
- return -ENOMEM;
+ switch (pmu->mfd->pmu_id) {
+ case BCM590XX_PMUID_BCM59054:
+ pmu->n_regulators = BCM59054_NUM_REGS;
+ if (pmu->mfd->rev_analog == BCM59054_REV_ANALOG_A1)
+ pmu->regs = bcm59054_a1_regs;
+ else
+ pmu->regs = bcm59054_regs;
+ break;
+ case BCM590XX_PMUID_BCM59056:
+ pmu->n_regulators = BCM59056_NUM_REGS;
+ pmu->regs = bcm59056_regs;
+ break;
+ default:
+ dev_err(bcm590xx->dev,
+ "unknown device type, could not initialize\n");
+ return -EINVAL;
+ }
- info = bcm590xx_regs;
-
- for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) {
- /* Register the regulators */
- pmu->desc[i].name = info->name;
- pmu->desc[i].of_match = of_match_ptr(info->name);
- pmu->desc[i].regulators_node = of_match_ptr("regulators");
- pmu->desc[i].supply_name = info->vin_name;
- pmu->desc[i].id = i;
- pmu->desc[i].volt_table = info->volt_table;
- pmu->desc[i].n_voltages = info->n_voltages;
- pmu->desc[i].linear_ranges = info->linear_ranges;
- pmu->desc[i].n_linear_ranges = info->n_linear_ranges;
-
- if ((BCM590XX_REG_IS_LDO(i)) || (BCM590XX_REG_IS_GPLDO(i))) {
- pmu->desc[i].ops = &bcm590xx_ops_ldo;
- pmu->desc[i].vsel_mask = BCM590XX_LDO_VSEL_MASK;
- } else if (BCM590XX_REG_IS_VBUS(i))
- pmu->desc[i].ops = &bcm590xx_ops_vbus;
- else {
- pmu->desc[i].ops = &bcm590xx_ops_dcdc;
- pmu->desc[i].vsel_mask = BCM590XX_SR_VSEL_MASK;
- }
+ platform_set_drvdata(pdev, pmu);
- if (BCM590XX_REG_IS_VBUS(i))
- pmu->desc[i].enable_mask = BCM590XX_VBUS_ENABLE;
- else {
- pmu->desc[i].vsel_reg = bcm590xx_get_vsel_register(i);
- pmu->desc[i].enable_is_inverted = true;
- pmu->desc[i].enable_mask = BCM590XX_REG_ENABLE;
- }
- pmu->desc[i].enable_reg = bcm590xx_get_enable_register(i);
- pmu->desc[i].type = REGULATOR_VOLTAGE;
- pmu->desc[i].owner = THIS_MODULE;
+ /* Register the regulators */
+ for (i = 0; i < pmu->n_regulators; i++) {
+ info = &pmu->regs[i];
config.dev = bcm590xx->dev;
config.driver_data = pmu;
- if (BCM590XX_REG_IS_GPLDO(i) || BCM590XX_REG_IS_VBUS(i))
- config.regmap = bcm590xx->regmap_sec;
- else
- config.regmap = bcm590xx->regmap_pri;
- rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i],
- &config);
- if (IS_ERR(rdev)) {
+ switch (info->regmap) {
+ case BCM590XX_REGMAP_PRI:
+ config.regmap = bcm590xx->regmap_pri;
+ break;
+ case BCM590XX_REGMAP_SEC:
+ config.regmap = bcm590xx->regmap_sec;
+ break;
+ default:
dev_err(bcm590xx->dev,
- "failed to register %s regulator\n",
+ "invalid regmap for %s regulator; this is a driver bug\n",
pdev->name);
- return PTR_ERR(rdev);
+ return -EINVAL;
}
+
+ rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(bcm590xx->dev, PTR_ERR(rdev),
+ "failed to register %s regulator\n",
+ pdev->name);
}
return 0;
diff --git a/drivers/regulator/bd96801-regulator.c b/drivers/regulator/bd96801-regulator.c
index 3a9d772491a8..24d21172298b 100644
--- a/drivers/regulator/bd96801-regulator.c
+++ b/drivers/regulator/bd96801-regulator.c
@@ -83,6 +83,7 @@ enum {
#define BD96801_LDO6_VSEL_REG 0x26
#define BD96801_LDO7_VSEL_REG 0x27
#define BD96801_BUCK_VSEL_MASK 0x1F
+#define BD96805_BUCK_VSEL_MASK 0x3f
#define BD96801_LDO_VSEL_MASK 0xff
#define BD96801_MASK_RAMP_DELAY 0xc0
@@ -90,6 +91,7 @@ enum {
#define BD96801_BUCK_INT_VOUT_MASK 0xff
#define BD96801_BUCK_VOLTS 256
+#define BD96805_BUCK_VOLTS 64
#define BD96801_LDO_VOLTS 256
#define BD96801_OVP_MASK 0x03
@@ -160,6 +162,30 @@ static const struct linear_range bd96801_buck_init_volts[] = {
REGULATOR_LINEAR_RANGE(3300000 - 150000, 0xed, 0xff, 0),
};
+/* BD96802 uses same voltage ranges for bucks as BD96801 */
+#define bd96802_tune_volts bd96801_tune_volts
+#define bd96802_buck_init_volts bd96801_buck_init_volts
+
+/*
+ * On BD96805 we have similar "negative tuning range" as on BD96801, except
+ * that the max tuning is -310 ... +310 mV (instead of the 150mV). We use same
+ * approach as with the BD96801 ranges.
+ */
+static const struct linear_range bd96805_tune_volts[] = {
+ REGULATOR_LINEAR_RANGE(310000, 0x00, 0x1F, 10000),
+ REGULATOR_LINEAR_RANGE(0, 0x20, 0x3F, 10000),
+};
+
+static const struct linear_range bd96805_buck_init_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000 - 310000, 0x00, 0xc8, 5000),
+ REGULATOR_LINEAR_RANGE(1550000 - 310000, 0xc9, 0xec, 50000),
+ REGULATOR_LINEAR_RANGE(3300000 - 310000, 0xed, 0xff, 0),
+};
+
+/* BD96806 uses same voltage ranges for bucks as BD96805 */
+#define bd96806_tune_volts bd96805_tune_volts
+#define bd96806_buck_init_volts bd96805_buck_init_volts
+
static const struct linear_range bd96801_ldo_int_volts[] = {
REGULATOR_LINEAR_RANGE(300000, 0x00, 0x78, 25000),
REGULATOR_LINEAR_RANGE(3300000, 0x79, 0xff, 0),
@@ -198,89 +224,89 @@ struct bd96801_irqinfo {
static const struct bd96801_irqinfo buck1_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-h", 500,
- "bd96801-buck1-overcurr-h"),
+ "buck1-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-l", 500,
- "bd96801-buck1-overcurr-l"),
+ "buck1-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-n", 500,
- "bd96801-buck1-overcurr-n"),
+ "buck1-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck1-over-voltage", 500,
- "bd96801-buck1-overvolt"),
+ "buck1-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck1-under-voltage", 500,
- "bd96801-buck1-undervolt"),
+ "buck1-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck1-over-temp", 500,
- "bd96801-buck1-thermal")
+ "buck1-thermal")
};
static const struct bd96801_irqinfo buck2_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-h", 500,
- "bd96801-buck2-overcurr-h"),
+ "buck2-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-l", 500,
- "bd96801-buck2-overcurr-l"),
+ "buck2-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-n", 500,
- "bd96801-buck2-overcurr-n"),
+ "buck2-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck2-over-voltage", 500,
- "bd96801-buck2-overvolt"),
+ "buck2-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck2-under-voltage", 500,
- "bd96801-buck2-undervolt"),
+ "buck2-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck2-over-temp", 500,
- "bd96801-buck2-thermal")
+ "buck2-thermal")
};
static const struct bd96801_irqinfo buck3_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-h", 500,
- "bd96801-buck3-overcurr-h"),
+ "buck3-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-l", 500,
- "bd96801-buck3-overcurr-l"),
+ "buck3-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-n", 500,
- "bd96801-buck3-overcurr-n"),
+ "buck3-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck3-over-voltage", 500,
- "bd96801-buck3-overvolt"),
+ "buck3-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck3-under-voltage", 500,
- "bd96801-buck3-undervolt"),
+ "buck3-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck3-over-temp", 500,
- "bd96801-buck3-thermal")
+ "buck3-thermal")
};
static const struct bd96801_irqinfo buck4_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-h", 500,
- "bd96801-buck4-overcurr-h"),
+ "buck4-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-l", 500,
- "bd96801-buck4-overcurr-l"),
+ "buck4-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-n", 500,
- "bd96801-buck4-overcurr-n"),
+ "buck4-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck4-over-voltage", 500,
- "bd96801-buck4-overvolt"),
+ "buck4-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck4-under-voltage", 500,
- "bd96801-buck4-undervolt"),
+ "buck4-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck4-over-temp", 500,
- "bd96801-buck4-thermal")
+ "buck4-thermal")
};
static const struct bd96801_irqinfo ldo5_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "ldo5-overcurr", 500,
- "bd96801-ldo5-overcurr"),
+ "ldo5-overcurr"),
BD96801_IRQINFO(BD96801_PROT_OVP, "ldo5-over-voltage", 500,
- "bd96801-ldo5-overvolt"),
+ "ldo5-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "ldo5-under-voltage", 500,
- "bd96801-ldo5-undervolt"),
+ "ldo5-undervolt"),
};
static const struct bd96801_irqinfo ldo6_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "ldo6-overcurr", 500,
- "bd96801-ldo6-overcurr"),
+ "ldo6-overcurr"),
BD96801_IRQINFO(BD96801_PROT_OVP, "ldo6-over-voltage", 500,
- "bd96801-ldo6-overvolt"),
+ "ldo6-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "ldo6-under-voltage", 500,
- "bd96801-ldo6-undervolt"),
+ "ldo6-undervolt"),
};
static const struct bd96801_irqinfo ldo7_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "ldo7-overcurr", 500,
- "bd96801-ldo7-overcurr"),
+ "ldo7-overcurr"),
BD96801_IRQINFO(BD96801_PROT_OVP, "ldo7-over-voltage", 500,
- "bd96801-ldo7-overvolt"),
+ "ldo7-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "ldo7-under-voltage", 500,
- "bd96801-ldo7-undervolt"),
+ "ldo7-undervolt"),
};
struct bd96801_irq_desc {
@@ -302,6 +328,7 @@ struct bd96801_pmic_data {
struct bd96801_regulator_data regulator_data[BD96801_NUM_REGULATORS];
struct regmap *regmap;
int fatal_ind;
+ int num_regulators;
};
static int ldo_map_notif(int irq, struct regulator_irq_data *rid,
@@ -503,6 +530,70 @@ static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap,
* case later. What we can easly do for preparing is to not use static global
* data for regulators though.
*/
+static const struct bd96801_pmic_data bd96802_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96802_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96802_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96802_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96802_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96802_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96802_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96802_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96802_buck_init_volts),
+ },
+ },
+ .num_regulators = 2,
+};
+
static const struct bd96801_pmic_data bd96801_data = {
.regulator_data = {
{
@@ -688,11 +779,265 @@ static const struct bd96801_pmic_data bd96801_data = {
.ldo_vol_lvl = BD96801_LDO7_VOL_LVL_REG,
},
},
+ .num_regulators = 7,
};
-static int initialize_pmic_data(struct device *dev,
+static const struct bd96801_pmic_data bd96805_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ }, {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("buck3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK3,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK3_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK3_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK3_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck3_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck3_irqinfo),
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("buck4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK4,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK4_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK4_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK4_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck4_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck4_irqinfo),
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("ldo5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO5,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO5_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO5_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo5_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo5_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO5_VOL_LVL_REG,
+ }, {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("ldo6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO6,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO6_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO6_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo6_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo6_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO6_VOL_LVL_REG,
+ }, {
+ .desc = {
+ .name = "ldo7",
+ .of_match = of_match_ptr("ldo7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO7,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO7_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO7_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo7_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo7_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO7_VOL_LVL_REG,
+ },
+ },
+ .num_regulators = 7,
+};
+
+static const struct bd96801_pmic_data bd96806_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96806_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96806_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96806_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96806_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96806_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96806_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96806_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96806_buck_init_volts),
+ },
+ },
+ .num_regulators = 2,
+};
+
+static int initialize_pmic_data(struct platform_device *pdev,
struct bd96801_pmic_data *pdata)
{
+ struct device *dev = &pdev->dev;
int r, i;
/*
@@ -700,7 +1045,7 @@ static int initialize_pmic_data(struct device *dev,
* wish to modify IRQ information independently for each driver
* instance.
*/
- for (r = 0; r < BD96801_NUM_REGULATORS; r++) {
+ for (r = 0; r < pdata->num_regulators; r++) {
const struct bd96801_irqinfo *template;
struct bd96801_irqinfo *new;
int num_infos;
@@ -741,8 +1086,7 @@ static int bd96801_rdev_errb_irqs(struct platform_device *pdev,
int i;
void *retp;
static const char * const single_out_errb_irqs[] = {
- "bd96801-%s-pvin-err", "bd96801-%s-ovp-err",
- "bd96801-%s-uvp-err", "bd96801-%s-shdn-err",
+ "%s-pvin-err", "%s-ovp-err", "%s-uvp-err", "%s-shdn-err",
};
for (i = 0; i < ARRAY_SIZE(single_out_errb_irqs); i++) {
@@ -779,12 +1123,10 @@ static int bd96801_global_errb_irqs(struct platform_device *pdev,
int i, num_irqs;
void *retp;
static const char * const global_errb_irqs[] = {
- "bd96801-otp-err", "bd96801-dbist-err", "bd96801-eep-err",
- "bd96801-abist-err", "bd96801-prstb-err", "bd96801-drmoserr1",
- "bd96801-drmoserr2", "bd96801-slave-err", "bd96801-vref-err",
- "bd96801-tsd", "bd96801-uvlo-err", "bd96801-ovlo-err",
- "bd96801-osc-err", "bd96801-pon-err", "bd96801-poff-err",
- "bd96801-cmd-shdn-err", "bd96801-int-shdn-err"
+ "otp-err", "dbist-err", "eep-err", "abist-err", "prstb-err",
+ "drmoserr1", "drmoserr2", "slave-err", "vref-err", "tsd",
+ "uvlo-err", "ovlo-err", "osc-err", "pon-err", "poff-err",
+ "cmd-shdn-err", "int-shdn-err"
};
num_irqs = ARRAY_SIZE(global_errb_irqs);
@@ -869,6 +1211,7 @@ static int bd96801_probe(struct platform_device *pdev)
{
struct regulator_dev *ldo_errs_rdev_arr[BD96801_NUM_LDOS];
struct regulator_dev *all_rdevs[BD96801_NUM_REGULATORS];
+ struct bd96801_pmic_data *pdata_template;
struct bd96801_regulator_data *rdesc;
struct regulator_config config = {};
int ldo_errs_arr[BD96801_NUM_LDOS];
@@ -881,12 +1224,16 @@ static int bd96801_probe(struct platform_device *pdev)
parent = pdev->dev.parent;
- pdata = devm_kmemdup(&pdev->dev, &bd96801_data, sizeof(bd96801_data),
+ pdata_template = (struct bd96801_pmic_data *)platform_get_device_id(pdev)->driver_data;
+ if (!pdata_template)
+ return -ENODEV;
+
+ pdata = devm_kmemdup(&pdev->dev, pdata_template, sizeof(bd96801_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (initialize_pmic_data(&pdev->dev, pdata))
+ if (initialize_pmic_data(pdev, pdata))
return -ENOMEM;
pdata->regmap = dev_get_regmap(parent, NULL);
@@ -909,11 +1256,11 @@ static int bd96801_probe(struct platform_device *pdev)
use_errb = true;
ret = bd96801_walk_regulator_dt(&pdev->dev, pdata->regmap, rdesc,
- BD96801_NUM_REGULATORS);
+ pdata->num_regulators);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(pdata->regulator_data); i++) {
+ for (i = 0; i < pdata->num_regulators; i++) {
struct regulator_dev *rdev;
struct bd96801_irq_desc *idesc = &rdesc[i].irq_desc;
int j;
@@ -926,6 +1273,7 @@ static int bd96801_probe(struct platform_device *pdev)
rdesc[i].desc.name);
return PTR_ERR(rdev);
}
+
all_rdevs[i] = rdev;
/*
* LDOs don't have own temperature monitoring. If temperature
@@ -956,12 +1304,12 @@ static int bd96801_probe(struct platform_device *pdev)
if (temp_notif_ldos) {
int irq;
struct regulator_irq_desc tw_desc = {
- .name = "bd96801-core-thermal",
+ .name = "core-thermal",
.irq_off_ms = 500,
.map_event = ldo_map_notif,
};
- irq = platform_get_irq_byname(pdev, "bd96801-core-thermal");
+ irq = platform_get_irq_byname(pdev, "core-thermal");
if (irq < 0)
return irq;
@@ -975,14 +1323,17 @@ static int bd96801_probe(struct platform_device *pdev)
if (use_errb)
return bd96801_global_errb_irqs(pdev, all_rdevs,
- ARRAY_SIZE(all_rdevs));
+ pdata->num_regulators);
return 0;
}
static const struct platform_device_id bd96801_pmic_id[] = {
- { "bd96801-regulator", },
- { }
+ { "bd96801-regulator", (kernel_ulong_t)&bd96801_data },
+ { "bd96802-regulator", (kernel_ulong_t)&bd96802_data },
+ { "bd96805-regulator", (kernel_ulong_t)&bd96805_data },
+ { "bd96806-regulator", (kernel_ulong_t)&bd96806_data },
+ { },
};
MODULE_DEVICE_TABLE(platform, bd96801_pmic_id);
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 5ff4e2fee4ab..1c7598b8475d 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -36,7 +36,7 @@ obj-$(CONFIG_RCAR_REMOTEPROC) += rcar_rproc.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
-obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
-obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o
-obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
+obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o ti_k3_common.o
+obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o ti_k3_common.o
+obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o ti_k3_common.o
obj-$(CONFIG_XLNX_R5_REMOTEPROC) += xlnx_r5_remoteproc.o
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 90cb1fc13e71..5ee622bf5352 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -36,9 +36,18 @@ module_param_named(no_mailboxes, no_mailboxes, int, 0644);
MODULE_PARM_DESC(no_mailboxes,
"There is no mailbox between cores, so ignore remote proc reply after start, default is 0 (off).");
+/* Flag indicating that the remote is up and running */
#define REMOTE_IS_READY BIT(0)
+/* Flag indicating that the host should wait for a firmware-ready response */
+#define WAIT_FW_READY BIT(1)
#define REMOTE_READY_WAIT_MAX_RETRIES 500
+/*
+ * This flag is set in the DSP resource table's features field to indicate
+ * that the firmware requires the host NOT to wait for a FW_READY response.
+ */
+#define FEATURE_DONT_WAIT_FW_READY BIT(0)
+
/* att flags */
/* DSP own area */
#define ATT_OWN BIT(31)
@@ -73,6 +82,10 @@ MODULE_PARM_DESC(no_mailboxes,
#define IMX8ULP_SIP_HIFI_XRDC 0xc200000e
+#define FW_RSC_NXP_S_MAGIC ((uint32_t)'n' << 24 | \
+ (uint32_t)'x' << 16 | \
+ (uint32_t)'p' << 8 | \
+ (uint32_t)'s')
/*
* enum - Predefined Mailbox Messages
*
@@ -139,6 +152,24 @@ struct imx_dsp_rproc_dcfg {
int (*reset)(struct imx_dsp_rproc *priv);
};
+/**
+ * struct fw_rsc_imx_dsp - i.MX DSP specific info
+ *
+ * @len: length of the resource entry
+ * @magic_num: 32-bit magic number
+ * @version: version of data structure
+ * @features: feature flags supported by the i.MX DSP firmware
+ *
+ * This represents a DSP-specific resource in the firmware's
+ * resource table, providing information on supported features.
+ */
+struct fw_rsc_imx_dsp {
+ uint32_t len;
+ uint32_t magic_num;
+ uint32_t version;
+ uint32_t features;
+} __packed;
+
static const struct imx_rproc_att imx_dsp_rproc_att_imx8qm[] = {
/* dev addr , sys addr , size , flags */
{ 0x596e8000, 0x556e8000, 0x00008000, ATT_OWN },
@@ -297,6 +328,66 @@ static int imx_dsp_rproc_ready(struct rproc *rproc)
return -ETIMEDOUT;
}
+/**
+ * imx_dsp_rproc_handle_rsc() - Handle DSP-specific resource table entries
+ * @rproc: remote processor instance
+ * @rsc_type: resource type identifier
+ * @rsc: pointer to the resource entry
+ * @offset: offset of the resource entry
+ * @avail: available space in the resource table
+ *
+ * Parse the DSP-specific resource entry and update flags accordingly.
+ * If the WAIT_FW_READY feature is set, the host must wait for the firmware
+ * to signal readiness before proceeding with execution.
+ *
+ * Return: RSC_HANDLED if processed successfully, RSC_IGNORED otherwise.
+ */
+static int imx_dsp_rproc_handle_rsc(struct rproc *rproc, u32 rsc_type,
+ void *rsc, int offset, int avail)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct fw_rsc_imx_dsp *imx_dsp_rsc = rsc;
+ struct device *dev = rproc->dev.parent;
+
+ if (!imx_dsp_rsc) {
+ dev_dbg(dev, "Invalid fw_rsc_imx_dsp.\n");
+ return RSC_IGNORED;
+ }
+
+ /* Make sure resource isn't truncated */
+ if (sizeof(struct fw_rsc_imx_dsp) > avail ||
+ sizeof(struct fw_rsc_imx_dsp) != imx_dsp_rsc->len) {
+ dev_dbg(dev, "Resource fw_rsc_imx_dsp is truncated.\n");
+ return RSC_IGNORED;
+ }
+
+ /*
+ * If FW_RSC_NXP_S_MAGIC number is not found then
+ * wait for fw_ready reply (default work flow)
+ */
+ if (imx_dsp_rsc->magic_num != FW_RSC_NXP_S_MAGIC) {
+ dev_dbg(dev, "Invalid resource table magic number.\n");
+ return RSC_IGNORED;
+ }
+
+ /*
+ * For now, in struct fw_rsc_imx_dsp, version 0,
+ * only FEATURE_DONT_WAIT_FW_READY is valid.
+ *
+ * When adding new features, please upgrade version.
+ */
+ if (imx_dsp_rsc->version > 0) {
+ dev_warn(dev, "Unexpected fw_rsc_imx_dsp version %d.\n",
+ imx_dsp_rsc->version);
+ return RSC_IGNORED;
+ }
+
+ if (imx_dsp_rsc->features & FEATURE_DONT_WAIT_FW_READY)
+ priv->flags &= ~WAIT_FW_READY;
+
+ return RSC_HANDLED;
+}
+
/*
* Start function for rproc_ops
*
@@ -335,8 +426,8 @@ static int imx_dsp_rproc_start(struct rproc *rproc)
if (ret)
dev_err(dev, "Failed to enable remote core!\n");
- else
- ret = imx_dsp_rproc_ready(rproc);
+ else if (priv->flags & WAIT_FW_READY)
+ return imx_dsp_rproc_ready(rproc);
return ret;
}
@@ -939,6 +1030,7 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
.kick = imx_dsp_rproc_kick,
.load = imx_dsp_rproc_elf_load_segments,
.parse_fw = imx_dsp_rproc_parse_fw,
+ .handle_rsc = imx_dsp_rproc_handle_rsc,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
@@ -1058,6 +1150,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
priv = rproc->priv;
priv->rproc = rproc;
priv->dsp_dcfg = dsp_dcfg;
+ /* By default, host waits for fw_ready reply */
+ priv->flags |= WAIT_FW_READY;
if (no_mailboxes)
imx_dsp_rproc_mbox_init = imx_dsp_rproc_mbox_no_alloc;
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index b989718776bd..2b52b403eb3f 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -196,6 +196,7 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
err_device_del:
device_del(&iris->dev);
+ put_device(&iris->dev);
return ERR_PTR(ret);
}
@@ -203,4 +204,5 @@ err_device_del:
void qcom_iris_remove(struct qcom_iris *iris)
{
device_del(&iris->dev);
+ put_device(&iris->dev);
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index b21eedefff87..81b2ccf988e8 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1617,7 +1617,7 @@ static int rproc_attach(struct rproc *rproc)
ret = rproc_set_rsc_table(rproc);
if (ret) {
dev_err(dev, "can't load resource table: %d\n", ret);
- goto unprepare_device;
+ goto clean_up_resources;
}
/* reset max_notifyid */
@@ -1634,7 +1634,7 @@ static int rproc_attach(struct rproc *rproc)
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
- goto unprepare_device;
+ goto clean_up_resources;
}
/* Allocate carveout resources associated to rproc */
@@ -1653,9 +1653,9 @@ static int rproc_attach(struct rproc *rproc)
clean_up_resources:
rproc_resource_cleanup(rproc);
-unprepare_device:
/* release HW resources if needed */
rproc_unprepare_device(rproc);
+ kfree(rproc->clean_table);
disable_iommu:
rproc_disable_iommu(rproc);
return ret;
@@ -2025,7 +2025,6 @@ int rproc_shutdown(struct rproc *rproc)
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
- rproc->table_sz = 0;
out:
mutex_unlock(&rproc->lock);
return ret;
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index b02b36a3f515..431648607d53 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -835,6 +835,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct stm32_rproc *ddata;
struct device_node *np = dev->of_node;
+ const char *fw_name;
struct rproc *rproc;
unsigned int state;
int ret;
@@ -843,7 +844,12 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (ret)
return ret;
- rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ /* Look for an optional firmware name */
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
+ rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, fw_name, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
diff --git a/drivers/remoteproc/ti_k3_common.c b/drivers/remoteproc/ti_k3_common.c
new file mode 100644
index 000000000000..d5dccc81d460
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_common.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 Remote Processor(s) driver common code
+ *
+ * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and
+ * ti_k3_m4_remoteproc.c.
+ *
+ * ti_k3_r5_remoteproc.c:
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_dsp_remoteproc.c:
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_m4_remoteproc.c:
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+#include "ti_k3_common.h"
+
+/**
+ * k3_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the K3 mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+void k3_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_rproc *kproc = container_of(client, struct k3_rproc, client);
+ struct device *dev = kproc->rproc->dev.parent;
+ struct rproc *rproc = kproc->rproc;
+ u32 msg = (u32)(uintptr_t)(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", rproc->name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+EXPORT_SYMBOL_GPL(k3_rproc_mbox_callback);
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+void k3_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 msg = (u32)vqid;
+ int ret;
+
+ /*
+ * Send the index of the triggered virtqueue in the mailbox payload.
+ * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
+ * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+EXPORT_SYMBOL_GPL(k3_rproc_kick);
+
+/* Put the remote processor into reset */
+int k3_rproc_reset(struct k3_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset) {
+ ret = reset_control_assert(kproc->reset);
+ if (ret)
+ dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
+ } else {
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_reset);
+
+/* Release the remote processor from reset */
+int k3_rproc_release(struct k3_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset) {
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(dev, "module-reset assert back failed\n");
+ }
+ } else {
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_release);
+
+int k3_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_request_mbox);
+
+/*
+ * The K3 DSP and M4 cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on remote cores to allow loading into the
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the remote cores to run.
+ */
+int k3_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is running already no need to deassert the module reset */
+ if (rproc->state == RPROC_DETACHED)
+ return 0;
+
+ /*
+ * Ensure the local reset is asserted so the core doesn't
+ * execute bogus code when the module reset is released.
+ */
+ if (kproc->data->uses_lreset) {
+ ret = k3_rproc_reset(kproc);
+ if (ret)
+ return ret;
+
+ ret = reset_control_status(kproc->reset);
+ if (ret <= 0) {
+ dev_err(dev, "local reset still not asserted\n");
+ return ret;
+ }
+ }
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_prepare);
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable K3 DSP and M4 cores. This completes the second
+ * portion of powering down the remote core. The cores themselves are only
+ * halted in the .stop() callback through the local reset, and the .unprepare()
+ * ops is invoked by the remoteproc core after the remoteproc is stopped to
+ * balance the global reset.
+ */
+int k3_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is going to be detached do not assert the module reset */
+ if (rproc->state == RPROC_DETACHED)
+ return 0;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_unprepare);
+
+/*
+ * Power up the remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met. This callback is invoked only in remoteproc mode.
+ */
+int k3_rproc_start(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+
+ return k3_rproc_release(kproc);
+}
+EXPORT_SYMBOL_GPL(k3_rproc_start);
+
+/*
+ * Stop the remote processor.
+ *
+ * This function puts the remote processor into reset, and finishes processing
+ * of any pending messages. This callback is invoked only in remoteproc mode.
+ */
+int k3_rproc_stop(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+
+ return k3_rproc_reset(kproc);
+}
+EXPORT_SYMBOL_GPL(k3_rproc_stop);
+
+/*
+ * Attach to a running remote processor (IPC-only mode)
+ *
+ * The rproc attach callback is a NOP. The remote processor is already booted,
+ * and all required resources have been acquired during probe routine, so there
+ * is no need to issue any TI-SCI commands to boot the remote cores in IPC-only
+ * mode. This callback is invoked only in IPC-only mode and exists because
+ * rproc_validate() checks for its existence.
+ */
+int k3_rproc_attach(struct rproc *rproc) { return 0; }
+EXPORT_SYMBOL_GPL(k3_rproc_attach);
+
+/*
+ * Detach from a running remote processor (IPC-only mode)
+ *
+ * The rproc detach callback is a NOP. The remote processor is not stopped and
+ * will be left in booted state in IPC-only mode. This callback is invoked only
+ * in IPC-only mode and exists for sanity sake
+ */
+int k3_rproc_detach(struct rproc *rproc) { return 0; }
+EXPORT_SYMBOL_GPL(k3_rproc_detach);
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted remote processor in IPC-only
+ * mode. The remote processor firmwares follow a design-by-contract approach
+ * and are expected to have the resource table at the base of the DDR region
+ * reserved for firmware usage. This provides flexibility for the remote
+ * processor to be booted by different bootloaders that may or may not have the
+ * ability to publish the resource table address and size through a DT
+ * property.
+ */
+struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+EXPORT_SYMBOL_GPL(k3_get_loaded_rsc_table);
+
+/*
+ * Custom function to translate a remote processor device address (internal
+ * RAMs only) to a kernel virtual address. The remote processors can access
+ * their RAMs at either an internal address visible only from a remote
+ * processor, or at the SoC-level bus address. Both these addresses need to be
+ * looked through for translation. The translated addresses can be used either
+ * by the remoteproc core for loading (when using kernel remoteproc loader), or
+ * by any rpmsg bus drivers.
+ */
+void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ /* handle rproc-view addresses */
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+
+ /* handle SoC-view addresses */
+ if (da >= bus_addr && (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_da_to_va);
+
+int k3_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_rproc *kproc)
+{
+ const struct k3_rproc_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ data->mems[i].name);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ data->mems[i].name);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!kproc->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ data->mems[i].name, &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_of_get_memories);
+
+void k3_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+EXPORT_SYMBOL_GPL(k3_mem_release);
+
+int k3_reserved_mem_init(struct k3_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems < 0) {
+ dev_err(dev, "device does not reserved memory regions (%d)\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(dev, k3_mem_release, dev);
+ if (ret)
+ return ret;
+
+ num_rmems--;
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ of_node_put(rmem_np);
+ if (!rmem)
+ return -EINVAL;
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_reserved_mem_init);
+
+void k3_release_tsp(void *data)
+{
+ struct ti_sci_proc *tsp = data;
+
+ ti_sci_proc_release(tsp);
+}
+EXPORT_SYMBOL_GPL(k3_release_tsp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 common Remoteproc code");
diff --git a/drivers/remoteproc/ti_k3_common.h b/drivers/remoteproc/ti_k3_common.h
new file mode 100644
index 000000000000..aee3c28dbe51
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_common.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TI K3 Remote Processor(s) driver common code
+ *
+ * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and
+ * ti_k3_m4_remoteproc.c.
+ *
+ * ti_k3_r5_remoteproc.c:
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_dsp_remoteproc.c:
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_m4_remoteproc.c:
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#ifndef REMOTEPROC_TI_K3_COMMON_H
+#define REMOTEPROC_TI_K3_COMMON_H
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct k3_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from remote processor view
+ * @size: Size of the memory region
+ */
+struct k3_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_rproc_mem_data - memory definitions for a remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_rproc_dev_data - device data structure for a remote processor
+ * @mems: pointer to memory definitions for a remote processor
+ * @num_mems: number of memory regions in @mems
+ * @boot_align_addr: boot vector address alignment granularity
+ * @uses_lreset: flag to denote the need for local reset management
+ */
+struct k3_rproc_dev_data {
+ const struct k3_rproc_mem_data *mems;
+ u32 num_mems;
+ u32 boot_align_addr;
+ bool uses_lreset;
+};
+
+/**
+ * struct k3_rproc - k3 remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @data: pointer to DSP-specific device data
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ * @priv: void pointer to carry any private data
+ */
+struct k3_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_rproc_mem *mem;
+ int num_mems;
+ struct k3_rproc_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ const struct k3_rproc_dev_data *data;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+ void *priv;
+};
+
+void k3_rproc_mbox_callback(struct mbox_client *client, void *data);
+void k3_rproc_kick(struct rproc *rproc, int vqid);
+int k3_rproc_reset(struct k3_rproc *kproc);
+int k3_rproc_release(struct k3_rproc *kproc);
+int k3_rproc_request_mbox(struct rproc *rproc);
+int k3_rproc_prepare(struct rproc *rproc);
+int k3_rproc_unprepare(struct rproc *rproc);
+int k3_rproc_start(struct rproc *rproc);
+int k3_rproc_stop(struct rproc *rproc);
+int k3_rproc_attach(struct rproc *rproc);
+int k3_rproc_detach(struct rproc *rproc);
+struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz);
+void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len,
+ bool *is_iomem);
+int k3_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_rproc *kproc);
+void k3_mem_release(void *data);
+int k3_reserved_mem_init(struct k3_rproc *kproc);
+void k3_release_tsp(void *data);
+#endif /* REMOTEPROC_TI_K3_COMMON_H */
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index a695890254ff..7a72933bd403 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -20,291 +20,7 @@
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
-
-#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
-
-/**
- * struct k3_dsp_mem - internal memory structure
- * @cpu_addr: MPU virtual address of the memory region
- * @bus_addr: Bus address used to access the memory region
- * @dev_addr: Device address of the memory region from DSP view
- * @size: Size of the memory region
- */
-struct k3_dsp_mem {
- void __iomem *cpu_addr;
- phys_addr_t bus_addr;
- u32 dev_addr;
- size_t size;
-};
-
-/**
- * struct k3_dsp_mem_data - memory definitions for a DSP
- * @name: name for this memory entry
- * @dev_addr: device address for the memory entry
- */
-struct k3_dsp_mem_data {
- const char *name;
- const u32 dev_addr;
-};
-
-/**
- * struct k3_dsp_dev_data - device data structure for a DSP
- * @mems: pointer to memory definitions for a DSP
- * @num_mems: number of memory regions in @mems
- * @boot_align_addr: boot vector address alignment granularity
- * @uses_lreset: flag to denote the need for local reset management
- */
-struct k3_dsp_dev_data {
- const struct k3_dsp_mem_data *mems;
- u32 num_mems;
- u32 boot_align_addr;
- bool uses_lreset;
-};
-
-/**
- * struct k3_dsp_rproc - k3 DSP remote processor driver structure
- * @dev: cached device pointer
- * @rproc: remoteproc device handle
- * @mem: internal memory regions data
- * @num_mems: number of internal memory regions
- * @rmem: reserved memory regions data
- * @num_rmems: number of reserved memory regions
- * @reset: reset control handle
- * @data: pointer to DSP-specific device data
- * @tsp: TI-SCI processor control handle
- * @ti_sci: TI-SCI handle
- * @ti_sci_id: TI-SCI device identifier
- * @mbox: mailbox channel handle
- * @client: mailbox client to request the mailbox channel
- */
-struct k3_dsp_rproc {
- struct device *dev;
- struct rproc *rproc;
- struct k3_dsp_mem *mem;
- int num_mems;
- struct k3_dsp_mem *rmem;
- int num_rmems;
- struct reset_control *reset;
- const struct k3_dsp_dev_data *data;
- struct ti_sci_proc *tsp;
- const struct ti_sci_handle *ti_sci;
- u32 ti_sci_id;
- struct mbox_chan *mbox;
- struct mbox_client client;
-};
-
-/**
- * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
- * @client: mailbox client pointer used for requesting the mailbox channel
- * @data: mailbox payload
- *
- * This handler is invoked by the OMAP mailbox driver whenever a mailbox
- * message is received. Usually, the mailbox payload simply contains
- * the index of the virtqueue that is kicked by the remote processor,
- * and we let remoteproc core handle it.
- *
- * In addition to virtqueue indices, we also have some out-of-band values
- * that indicate different events. Those values are deliberately very
- * large so they don't coincide with virtqueue indices.
- */
-static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
-{
- struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
- client);
- struct device *dev = kproc->rproc->dev.parent;
- const char *name = kproc->rproc->name;
- u32 msg = omap_mbox_message(data);
-
- /* Do not forward messages from a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- dev_dbg(dev, "mbox msg: 0x%x\n", msg);
-
- switch (msg) {
- case RP_MBOX_CRASH:
- /*
- * remoteproc detected an exception, but error recovery is not
- * supported. So, just log this for now
- */
- dev_err(dev, "K3 DSP rproc %s crashed\n", name);
- break;
- case RP_MBOX_ECHO_REPLY:
- dev_info(dev, "received echo reply from %s\n", name);
- break;
- default:
- /* silently handle all other valid messages */
- if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
- return;
- if (msg > kproc->rproc->max_notifyid) {
- dev_dbg(dev, "dropping unknown message 0x%x", msg);
- return;
- }
- /* msg contains the index of the triggered vring */
- if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
- dev_dbg(dev, "no message was found in vqid %d\n", msg);
- }
-}
-
-/*
- * Kick the remote processor to notify about pending unprocessed messages.
- * The vqid usage is not used and is inconsequential, as the kick is performed
- * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
- * the remote processor is expected to process both its Tx and Rx virtqueues.
- */
-static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = rproc->dev.parent;
- mbox_msg_t msg = (mbox_msg_t)vqid;
- int ret;
-
- /* Do not forward messages to a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- /* send the index of the triggered virtqueue in the mailbox payload */
- ret = mbox_send_message(kproc->mbox, (void *)msg);
- if (ret < 0)
- dev_err(dev, "failed to send mailbox message (%pe)\n",
- ERR_PTR(ret));
-}
-
-/* Put the DSP processor into reset */
-static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- int ret;
-
- ret = reset_control_assert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
- return ret;
- }
-
- if (kproc->data->uses_lreset)
- return ret;
-
- ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
- if (reset_control_deassert(kproc->reset))
- dev_warn(dev, "local-reset deassert back failed\n");
- }
-
- return ret;
-}
-
-/* Release the DSP processor from reset */
-static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- int ret;
-
- if (kproc->data->uses_lreset)
- goto lreset;
-
- ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
- return ret;
- }
-
-lreset:
- ret = reset_control_deassert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
- if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id))
- dev_warn(dev, "module-reset assert back failed\n");
- }
-
- return ret;
-}
-
-static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct mbox_client *client = &kproc->client;
- struct device *dev = kproc->dev;
- int ret;
-
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_dsp_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox))
- return dev_err_probe(dev, PTR_ERR(kproc->mbox),
- "mbox_request_channel failed\n");
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
- mbox_free_channel(kproc->mbox);
- return ret;
- }
-
- return 0;
-}
-/*
- * The C66x DSP cores have a local reset that affects only the CPU, and a
- * generic module reset that powers on the device and allows the DSP internal
- * memories to be accessed while the local reset is asserted. This function is
- * used to release the global reset on C66x DSPs to allow loading into the DSP
- * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
- * firmware loading, and is followed by the .start() ops after loading to
- * actually let the C66x DSP cores run. This callback is invoked only in
- * remoteproc mode.
- */
-static int k3_dsp_rproc_prepare(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret)
- dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading (%pe)\n",
- ERR_PTR(ret));
-
- return ret;
-}
-
-/*
- * This function implements the .unprepare() ops and performs the complimentary
- * operations to that of the .prepare() ops. The function is used to assert the
- * global reset on applicable C66x cores. This completes the second portion of
- * powering down the C66x DSP cores. The cores themselves are only halted in the
- * .stop() callback through the local reset, and the .unprepare() ops is invoked
- * by the remoteproc core after the remoteproc is stopped to balance the global
- * reset. This callback is invoked only in remoteproc mode.
- */
-static int k3_dsp_rproc_unprepare(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret)
- dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
-
- return ret;
-}
+#include "ti_k3_common.h"
/*
* Power up the DSP remote processor.
@@ -315,7 +31,7 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc)
*/
static int k3_dsp_rproc_start(struct rproc *rproc)
{
- struct k3_dsp_rproc *kproc = rproc->priv;
+ struct k3_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
u32 boot_addr;
int ret;
@@ -332,288 +48,30 @@ static int k3_dsp_rproc_start(struct rproc *rproc)
if (ret)
return ret;
- ret = k3_dsp_rproc_release(kproc);
+ /* Call the K3 common start function after doing DSP specific stuff */
+ ret = k3_rproc_start(rproc);
if (ret)
return ret;
return 0;
}
-/*
- * Stop the DSP remote processor.
- *
- * This function puts the DSP processor into reset, and finishes processing
- * of any pending messages. This callback is invoked only in remoteproc mode.
- */
-static int k3_dsp_rproc_stop(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
-
- k3_dsp_rproc_reset(kproc);
-
- return 0;
-}
-
-/*
- * Attach to a running DSP remote processor (IPC-only mode)
- *
- * This rproc attach callback is a NOP. The remote processor is already booted,
- * and all required resources have been acquired during probe routine, so there
- * is no need to issue any TI-SCI commands to boot the DSP core. This callback
- * is invoked only in IPC-only mode and exists because rproc_validate() checks
- * for its existence.
- */
-static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; }
-
-/*
- * Detach from a running DSP remote processor (IPC-only mode)
- *
- * This rproc detach callback is a NOP. The DSP core is not stopped and will be
- * left to continue to run its booted firmware. This callback is invoked only in
- * IPC-only mode and exists for sanity sake.
- */
-static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; }
-
-/*
- * This function implements the .get_loaded_rsc_table() callback and is used
- * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
- * firmwares follow a design-by-contract approach and are expected to have the
- * resource table at the base of the DDR region reserved for firmware usage.
- * This provides flexibility for the remote processor to be booted by different
- * bootloaders that may or may not have the ability to publish the resource table
- * address and size through a DT property. This callback is invoked only in
- * IPC-only mode.
- */
-static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
- size_t *rsc_table_sz)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- if (!kproc->rmem[0].cpu_addr) {
- dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * NOTE: The resource table size is currently hard-coded to a maximum
- * of 256 bytes. The most common resource table usage for K3 firmwares
- * is to only have the vdev resource entry and an optional trace entry.
- * The exact size could be computed based on resource table address, but
- * the hard-coded value suffices to support the IPC-only mode.
- */
- *rsc_table_sz = 256;
- return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
-}
-
-/*
- * Custom function to translate a DSP device address (internal RAMs only) to a
- * kernel virtual address. The DSPs can access their RAMs at either an internal
- * address visible only from a DSP, or at the SoC-level bus address. Both these
- * addresses need to be looked through for translation. The translated addresses
- * can be used either by the remoteproc core for loading (when using kernel
- * remoteproc loader), or by any rpmsg bus drivers.
- */
-static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- void __iomem *va = NULL;
- phys_addr_t bus_addr;
- u32 dev_addr, offset;
- size_t size;
- int i;
-
- if (len == 0)
- return NULL;
-
- for (i = 0; i < kproc->num_mems; i++) {
- bus_addr = kproc->mem[i].bus_addr;
- dev_addr = kproc->mem[i].dev_addr;
- size = kproc->mem[i].size;
-
- if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
- /* handle DSP-view addresses */
- if (da >= dev_addr &&
- ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- } else {
- /* handle SoC-view addresses */
- if (da >= bus_addr &&
- (da + len) <= (bus_addr + size)) {
- offset = da - bus_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
- }
-
- /* handle static DDR reserved memory regions */
- for (i = 0; i < kproc->num_rmems; i++) {
- dev_addr = kproc->rmem[i].dev_addr;
- size = kproc->rmem[i].size;
-
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->rmem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- return NULL;
-}
-
static const struct rproc_ops k3_dsp_rproc_ops = {
- .start = k3_dsp_rproc_start,
- .stop = k3_dsp_rproc_stop,
- .kick = k3_dsp_rproc_kick,
- .da_to_va = k3_dsp_rproc_da_to_va,
+ .start = k3_dsp_rproc_start,
+ .stop = k3_rproc_stop,
+ .attach = k3_rproc_attach,
+ .detach = k3_rproc_detach,
+ .kick = k3_rproc_kick,
+ .da_to_va = k3_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_get_loaded_rsc_table,
};
-static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
- struct k3_dsp_rproc *kproc)
-{
- const struct k3_dsp_dev_data *data = kproc->data;
- struct device *dev = &pdev->dev;
- struct resource *res;
- int num_mems = 0;
- int i;
-
- num_mems = kproc->data->num_mems;
- kproc->mem = devm_kcalloc(kproc->dev, num_mems,
- sizeof(*kproc->mem), GFP_KERNEL);
- if (!kproc->mem)
- return -ENOMEM;
-
- for (i = 0; i < num_mems; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- data->mems[i].name);
- if (!res) {
- dev_err(dev, "found no memory resource for %s\n",
- data->mems[i].name);
- return -EINVAL;
- }
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res),
- dev_name(dev))) {
- dev_err(dev, "could not request %s region for resource\n",
- data->mems[i].name);
- return -EBUSY;
- }
-
- kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
- resource_size(res));
- if (!kproc->mem[i].cpu_addr) {
- dev_err(dev, "failed to map %s memory\n",
- data->mems[i].name);
- return -ENOMEM;
- }
- kproc->mem[i].bus_addr = res->start;
- kproc->mem[i].dev_addr = data->mems[i].dev_addr;
- kproc->mem[i].size = resource_size(res);
-
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- data->mems[i].name, &kproc->mem[i].bus_addr,
- kproc->mem[i].size, kproc->mem[i].cpu_addr,
- kproc->mem[i].dev_addr);
- }
- kproc->num_mems = num_mems;
-
- return 0;
-}
-
-static void k3_dsp_mem_release(void *data)
-{
- struct device *dev = data;
-
- of_reserved_mem_device_release(dev);
-}
-
-static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- struct device_node *np = dev->of_node;
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
- int num_rmems;
- int ret, i;
-
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
- if (num_rmems < 0) {
- dev_err(dev, "device does not reserved memory regions (%pe)\n",
- ERR_PTR(num_rmems));
- return -EINVAL;
- }
- if (num_rmems < 2) {
- dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
- num_rmems);
- return -EINVAL;
- }
-
- /* use reserved memory region 0 for vring DMA allocations */
- ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
- if (ret) {
- dev_err(dev, "device cannot initialize DMA pool (%pe)\n",
- ERR_PTR(ret));
- return ret;
- }
- ret = devm_add_action_or_reset(dev, k3_dsp_mem_release, dev);
- if (ret)
- return ret;
-
- num_rmems--;
- kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem)
- return -ENOMEM;
-
- /* use remaining reserved memory regions for static carveouts */
- for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
-
- kproc->rmem[i].bus_addr = rmem->base;
- /* 64-bit address regions currently not supported */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i + 1, &kproc->rmem[i].bus_addr,
- kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
- kproc->rmem[i].dev_addr);
- }
- kproc->num_rmems = num_rmems;
-
- return 0;
-}
-
-static void k3_dsp_release_tsp(void *data)
-{
- struct ti_sci_proc *tsp = data;
-
- ti_sci_proc_release(tsp);
-}
-
static int k3_dsp_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct k3_dsp_dev_data *data;
- struct k3_dsp_rproc *kproc;
+ const struct k3_rproc_dev_data *data;
+ struct k3_rproc *kproc;
struct rproc *rproc;
const char *fw_name;
bool p_state = false;
@@ -635,15 +93,15 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
rproc->has_iommu = false;
rproc->recovery_disabled = true;
if (data->uses_lreset) {
- rproc->ops->prepare = k3_dsp_rproc_prepare;
- rproc->ops->unprepare = k3_dsp_rproc_unprepare;
+ rproc->ops->prepare = k3_rproc_prepare;
+ rproc->ops->unprepare = k3_rproc_unprepare;
}
kproc = rproc->priv;
kproc->rproc = rproc;
kproc->dev = dev;
kproc->data = data;
- ret = k3_dsp_rproc_request_mbox(rproc);
+ ret = k3_rproc_request_mbox(rproc);
if (ret)
return ret;
@@ -671,15 +129,15 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
return ret;
}
- ret = devm_add_action_or_reset(dev, k3_dsp_release_tsp, kproc->tsp);
+ ret = devm_add_action_or_reset(dev, k3_release_tsp, kproc->tsp);
if (ret)
return ret;
- ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
+ ret = k3_rproc_of_get_memories(pdev, kproc);
if (ret)
return ret;
- ret = k3_dsp_reserved_mem_init(kproc);
+ ret = k3_reserved_mem_init(kproc);
if (ret)
return dev_err_probe(dev, ret, "reserved memory init failed\n");
@@ -692,30 +150,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
if (p_state) {
dev_info(dev, "configured DSP for IPC-only mode\n");
rproc->state = RPROC_DETACHED;
- /* override rproc ops with only required IPC-only mode ops */
- rproc->ops->prepare = NULL;
- rproc->ops->unprepare = NULL;
- rproc->ops->start = NULL;
- rproc->ops->stop = NULL;
- rproc->ops->attach = k3_dsp_rproc_attach;
- rproc->ops->detach = k3_dsp_rproc_detach;
- rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
} else {
dev_info(dev, "configured DSP for remoteproc mode\n");
- /*
- * ensure the DSP local reset is asserted to ensure the DSP
- * doesn't execute bogus code in .prepare() when the module
- * reset is released.
- */
- if (data->uses_lreset) {
- ret = reset_control_status(kproc->reset);
- if (ret < 0) {
- return dev_err_probe(dev, ret, "failed to get reset status\n");
- } else if (ret == 0) {
- dev_warn(dev, "local reset is deasserted for device\n");
- k3_dsp_rproc_reset(kproc);
- }
- }
}
ret = devm_rproc_add(dev, rproc);
@@ -729,7 +165,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
static void k3_dsp_rproc_remove(struct platform_device *pdev)
{
- struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+ struct k3_rproc *kproc = platform_get_drvdata(pdev);
struct rproc *rproc = kproc->rproc;
struct device *dev = &pdev->dev;
int ret;
@@ -743,37 +179,37 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev)
mbox_free_channel(kproc->mbox);
}
-static const struct k3_dsp_mem_data c66_mems[] = {
+static const struct k3_rproc_mem_data c66_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
{ .name = "l1pram", .dev_addr = 0xe00000 },
{ .name = "l1dram", .dev_addr = 0xf00000 },
};
/* C71x cores only have a L1P Cache, there are no L1P SRAMs */
-static const struct k3_dsp_mem_data c71_mems[] = {
+static const struct k3_rproc_mem_data c71_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
{ .name = "l1dram", .dev_addr = 0xe00000 },
};
-static const struct k3_dsp_mem_data c7xv_mems[] = {
+static const struct k3_rproc_mem_data c7xv_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
};
-static const struct k3_dsp_dev_data c66_data = {
+static const struct k3_rproc_dev_data c66_data = {
.mems = c66_mems,
.num_mems = ARRAY_SIZE(c66_mems),
.boot_align_addr = SZ_1K,
.uses_lreset = true,
};
-static const struct k3_dsp_dev_data c71_data = {
+static const struct k3_rproc_dev_data c71_data = {
.mems = c71_mems,
.num_mems = ARRAY_SIZE(c71_mems),
.boot_align_addr = SZ_2M,
.uses_lreset = false,
};
-static const struct k3_dsp_dev_data c7xv_data = {
+static const struct k3_rproc_dev_data c7xv_data = {
.mems = c7xv_mems,
.num_mems = ARRAY_SIZE(c7xv_mems),
.boot_align_addr = SZ_2M,
diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c
index a16fb165fced..3a11fd24eb52 100644
--- a/drivers/remoteproc/ti_k3_m4_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c
@@ -19,552 +19,35 @@
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
-
-#define K3_M4_IRAM_DEV_ADDR 0x00000
-#define K3_M4_DRAM_DEV_ADDR 0x30000
-
-/**
- * struct k3_m4_rproc_mem - internal memory structure
- * @cpu_addr: MPU virtual address of the memory region
- * @bus_addr: Bus address used to access the memory region
- * @dev_addr: Device address of the memory region from remote processor view
- * @size: Size of the memory region
- */
-struct k3_m4_rproc_mem {
- void __iomem *cpu_addr;
- phys_addr_t bus_addr;
- u32 dev_addr;
- size_t size;
-};
-
-/**
- * struct k3_m4_rproc_mem_data - memory definitions for a remote processor
- * @name: name for this memory entry
- * @dev_addr: device address for the memory entry
- */
-struct k3_m4_rproc_mem_data {
- const char *name;
- const u32 dev_addr;
-};
-
-/**
- * struct k3_m4_rproc - k3 remote processor driver structure
- * @dev: cached device pointer
- * @mem: internal memory regions data
- * @num_mems: number of internal memory regions
- * @rmem: reserved memory regions data
- * @num_rmems: number of reserved memory regions
- * @reset: reset control handle
- * @tsp: TI-SCI processor control handle
- * @ti_sci: TI-SCI handle
- * @ti_sci_id: TI-SCI device identifier
- * @mbox: mailbox channel handle
- * @client: mailbox client to request the mailbox channel
- */
-struct k3_m4_rproc {
- struct device *dev;
- struct k3_m4_rproc_mem *mem;
- int num_mems;
- struct k3_m4_rproc_mem *rmem;
- int num_rmems;
- struct reset_control *reset;
- struct ti_sci_proc *tsp;
- const struct ti_sci_handle *ti_sci;
- u32 ti_sci_id;
- struct mbox_chan *mbox;
- struct mbox_client client;
-};
-
-/**
- * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
- * @client: mailbox client pointer used for requesting the mailbox channel
- * @data: mailbox payload
- *
- * This handler is invoked by the K3 mailbox driver whenever a mailbox
- * message is received. Usually, the mailbox payload simply contains
- * the index of the virtqueue that is kicked by the remote processor,
- * and we let remoteproc core handle it.
- *
- * In addition to virtqueue indices, we also have some out-of-band values
- * that indicate different events. Those values are deliberately very
- * large so they don't coincide with virtqueue indices.
- */
-static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data)
-{
- struct device *dev = client->dev;
- struct rproc *rproc = dev_get_drvdata(dev);
- u32 msg = (u32)(uintptr_t)(data);
-
- dev_dbg(dev, "mbox msg: 0x%x\n", msg);
-
- switch (msg) {
- case RP_MBOX_CRASH:
- /*
- * remoteproc detected an exception, but error recovery is not
- * supported. So, just log this for now
- */
- dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
- break;
- case RP_MBOX_ECHO_REPLY:
- dev_info(dev, "received echo reply from %s\n", rproc->name);
- break;
- default:
- /* silently handle all other valid messages */
- if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
- return;
- if (msg > rproc->max_notifyid) {
- dev_dbg(dev, "dropping unknown message 0x%x", msg);
- return;
- }
- /* msg contains the index of the triggered vring */
- if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
- dev_dbg(dev, "no message was found in vqid %d\n", msg);
- }
-}
-
-/*
- * Kick the remote processor to notify about pending unprocessed messages.
- * The vqid usage is not used and is inconsequential, as the kick is performed
- * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
- * the remote processor is expected to process both its Tx and Rx virtqueues.
- */
-static void k3_m4_rproc_kick(struct rproc *rproc, int vqid)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- u32 msg = (u32)vqid;
- int ret;
-
- /*
- * Send the index of the triggered virtqueue in the mailbox payload.
- * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
- * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
- */
- ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
- if (ret < 0)
- dev_err(dev, "failed to send mailbox message, status = %d\n",
- ret);
-}
-
-static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- int ret;
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * The M4 cores have a local reset that affects only the CPU, and a
- * generic module reset that powers on the device and allows the internal
- * memories to be accessed while the local reset is asserted. This function is
- * used to release the global reset on remote cores to allow loading into the
- * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
- * firmware loading, and is followed by the .start() ops after loading to
- * actually let the remote cores to run.
- */
-static int k3_m4_rproc_prepare(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- /* If the core is running already no need to deassert the module reset */
- if (rproc->state == RPROC_DETACHED)
- return 0;
-
- /*
- * Ensure the local reset is asserted so the core doesn't
- * execute bogus code when the module reset is released.
- */
- ret = reset_control_assert(kproc->reset);
- if (ret) {
- dev_err(dev, "could not assert local reset\n");
- return ret;
- }
-
- ret = reset_control_status(kproc->reset);
- if (ret <= 0) {
- dev_err(dev, "local reset still not asserted\n");
- return ret;
- }
-
- ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
- return ret;
- }
-
- return 0;
-}
-
-/*
- * This function implements the .unprepare() ops and performs the complimentary
- * operations to that of the .prepare() ops. The function is used to assert the
- * global reset on applicable cores. This completes the second portion of
- * powering down the remote core. The cores themselves are only halted in the
- * .stop() callback through the local reset, and the .unprepare() ops is invoked
- * by the remoteproc core after the remoteproc is stopped to balance the global
- * reset.
- */
-static int k3_m4_rproc_unprepare(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- /* If the core is going to be detached do not assert the module reset */
- if (rproc->state == RPROC_ATTACHED)
- return 0;
-
- ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "module-reset assert failed\n");
- return ret;
- }
-
- return 0;
-}
-
-/*
- * This function implements the .get_loaded_rsc_table() callback and is used
- * to provide the resource table for a booted remote processor in IPC-only
- * mode. The remote processor firmwares follow a design-by-contract approach
- * and are expected to have the resource table at the base of the DDR region
- * reserved for firmware usage. This provides flexibility for the remote
- * processor to be booted by different bootloaders that may or may not have the
- * ability to publish the resource table address and size through a DT
- * property.
- */
-static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc,
- size_t *rsc_table_sz)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- if (!kproc->rmem[0].cpu_addr) {
- dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * NOTE: The resource table size is currently hard-coded to a maximum
- * of 256 bytes. The most common resource table usage for K3 firmwares
- * is to only have the vdev resource entry and an optional trace entry.
- * The exact size could be computed based on resource table address, but
- * the hard-coded value suffices to support the IPC-only mode.
- */
- *rsc_table_sz = 256;
- return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
-}
-
-/*
- * Custom function to translate a remote processor device address (internal
- * RAMs only) to a kernel virtual address. The remote processors can access
- * their RAMs at either an internal address visible only from a remote
- * processor, or at the SoC-level bus address. Both these addresses need to be
- * looked through for translation. The translated addresses can be used either
- * by the remoteproc core for loading (when using kernel remoteproc loader), or
- * by any rpmsg bus drivers.
- */
-static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- void __iomem *va = NULL;
- phys_addr_t bus_addr;
- u32 dev_addr, offset;
- size_t size;
- int i;
-
- if (len == 0)
- return NULL;
-
- for (i = 0; i < kproc->num_mems; i++) {
- bus_addr = kproc->mem[i].bus_addr;
- dev_addr = kproc->mem[i].dev_addr;
- size = kproc->mem[i].size;
-
- /* handle M4-view addresses */
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
-
- /* handle SoC-view addresses */
- if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
- offset = da - bus_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- /* handle static DDR reserved memory regions */
- for (i = 0; i < kproc->num_rmems; i++) {
- dev_addr = kproc->rmem[i].dev_addr;
- size = kproc->rmem[i].size;
-
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->rmem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- return NULL;
-}
-
-static int k3_m4_rproc_of_get_memories(struct platform_device *pdev,
- struct k3_m4_rproc *kproc)
-{
- static const char * const mem_names[] = { "iram", "dram" };
- static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR };
- struct device *dev = &pdev->dev;
- struct resource *res;
- int num_mems;
- int i;
-
- num_mems = ARRAY_SIZE(mem_names);
- kproc->mem = devm_kcalloc(kproc->dev, num_mems,
- sizeof(*kproc->mem), GFP_KERNEL);
- if (!kproc->mem)
- return -ENOMEM;
-
- for (i = 0; i < num_mems; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- mem_names[i]);
- if (!res) {
- dev_err(dev, "found no memory resource for %s\n",
- mem_names[i]);
- return -EINVAL;
- }
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res),
- dev_name(dev))) {
- dev_err(dev, "could not request %s region for resource\n",
- mem_names[i]);
- return -EBUSY;
- }
-
- kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
- resource_size(res));
- if (!kproc->mem[i].cpu_addr) {
- dev_err(dev, "failed to map %s memory\n",
- mem_names[i]);
- return -ENOMEM;
- }
- kproc->mem[i].bus_addr = res->start;
- kproc->mem[i].dev_addr = mem_addrs[i];
- kproc->mem[i].size = resource_size(res);
-
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- mem_names[i], &kproc->mem[i].bus_addr,
- kproc->mem[i].size, kproc->mem[i].cpu_addr,
- kproc->mem[i].dev_addr);
- }
- kproc->num_mems = num_mems;
-
- return 0;
-}
-
-static void k3_m4_rproc_dev_mem_release(void *data)
-{
- struct device *dev = data;
-
- of_reserved_mem_device_release(dev);
-}
-
-static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- struct device_node *np = dev->of_node;
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
- int num_rmems;
- int ret, i;
-
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
- if (num_rmems < 0) {
- dev_err(dev, "device does not reserved memory regions (%d)\n",
- num_rmems);
- return -EINVAL;
- }
- if (num_rmems < 2) {
- dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
- num_rmems);
- return -EINVAL;
- }
-
- /* use reserved memory region 0 for vring DMA allocations */
- ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
- if (ret) {
- dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev);
- if (ret)
- return ret;
-
- num_rmems--;
- kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem)
- return -ENOMEM;
-
- /* use remaining reserved memory regions for static carveouts */
- for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
-
- kproc->rmem[i].bus_addr = rmem->base;
- /* 64-bit address regions currently not supported */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i + 1, &kproc->rmem[i].bus_addr,
- kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
- kproc->rmem[i].dev_addr);
- }
- kproc->num_rmems = num_rmems;
-
- return 0;
-}
-
-static void k3_m4_release_tsp(void *data)
-{
- struct ti_sci_proc *tsp = data;
-
- ti_sci_proc_release(tsp);
-}
-
-/*
- * Power up the M4 remote processor.
- *
- * This function will be invoked only after the firmware for this rproc
- * was loaded, parsed successfully, and all of its resource requirements
- * were met. This callback is invoked only in remoteproc mode.
- */
-static int k3_m4_rproc_start(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = k3_m4_rproc_ping_mbox(kproc);
- if (ret)
- return ret;
-
- ret = reset_control_deassert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Stop the M4 remote processor.
- *
- * This function puts the M4 processor into reset, and finishes processing
- * of any pending messages. This callback is invoked only in remoteproc mode.
- */
-static int k3_m4_rproc_stop(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = reset_control_assert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Attach to a running M4 remote processor (IPC-only mode)
- *
- * The remote processor is already booted, so there is no need to issue any
- * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only
- * mode.
- */
-static int k3_m4_rproc_attach(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- int ret;
-
- ret = k3_m4_rproc_ping_mbox(kproc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * Detach from a running M4 remote processor (IPC-only mode)
- *
- * This rproc detach callback performs the opposite operation to attach
- * callback, the M4 core is not stopped and will be left to continue to
- * run its booted firmware. This callback is invoked only in IPC-only mode.
- */
-static int k3_m4_rproc_detach(struct rproc *rproc)
-{
- return 0;
-}
+#include "ti_k3_common.h"
static const struct rproc_ops k3_m4_rproc_ops = {
- .prepare = k3_m4_rproc_prepare,
- .unprepare = k3_m4_rproc_unprepare,
- .start = k3_m4_rproc_start,
- .stop = k3_m4_rproc_stop,
- .attach = k3_m4_rproc_attach,
- .detach = k3_m4_rproc_detach,
- .kick = k3_m4_rproc_kick,
- .da_to_va = k3_m4_rproc_da_to_va,
- .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table,
+ .prepare = k3_rproc_prepare,
+ .unprepare = k3_rproc_unprepare,
+ .start = k3_rproc_start,
+ .stop = k3_rproc_stop,
+ .attach = k3_rproc_attach,
+ .detach = k3_rproc_detach,
+ .kick = k3_rproc_kick,
+ .da_to_va = k3_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_get_loaded_rsc_table,
};
static int k3_m4_rproc_probe(struct platform_device *pdev)
{
+ const struct k3_rproc_dev_data *data;
struct device *dev = &pdev->dev;
- struct k3_m4_rproc *kproc;
+ struct k3_rproc *kproc;
struct rproc *rproc;
const char *fw_name;
bool r_state = false;
bool p_state = false;
int ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
if (ret)
return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
@@ -578,6 +61,8 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
rproc->recovery_disabled = true;
kproc = rproc->priv;
kproc->dev = dev;
+ kproc->rproc = rproc;
+ kproc->data = data;
platform_set_drvdata(pdev, rproc);
kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
@@ -601,15 +86,15 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
ret = ti_sci_proc_request(kproc->tsp);
if (ret < 0)
return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
- ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp);
+ ret = devm_add_action_or_reset(dev, k3_release_tsp, kproc->tsp);
if (ret)
return ret;
- ret = k3_m4_rproc_of_get_memories(pdev, kproc);
+ ret = k3_rproc_of_get_memories(pdev, kproc);
if (ret)
return ret;
- ret = k3_m4_reserved_mem_init(kproc);
+ ret = k3_reserved_mem_init(kproc);
if (ret)
return dev_err_probe(dev, ret, "reserved memory init failed\n");
@@ -627,15 +112,9 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
dev_info(dev, "configured M4F for remoteproc mode\n");
}
- kproc->client.dev = dev;
- kproc->client.tx_done = NULL;
- kproc->client.rx_callback = k3_m4_rproc_mbox_callback;
- kproc->client.tx_block = false;
- kproc->client.knows_txdone = false;
- kproc->mbox = mbox_request_channel(&kproc->client, 0);
- if (IS_ERR(kproc->mbox))
- return dev_err_probe(dev, PTR_ERR(kproc->mbox),
- "mbox_request_channel failed\n");
+ ret = k3_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
ret = devm_rproc_add(dev, rproc);
if (ret)
@@ -645,8 +124,20 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
return 0;
}
+static const struct k3_rproc_mem_data am64_m4_mems[] = {
+ { .name = "iram", .dev_addr = 0x0 },
+ { .name = "dram", .dev_addr = 0x30000 },
+};
+
+static const struct k3_rproc_dev_data am64_m4_data = {
+ .mems = am64_m4_mems,
+ .num_mems = ARRAY_SIZE(am64_m4_mems),
+ .boot_align_addr = SZ_1K,
+ .uses_lreset = true,
+};
+
static const struct of_device_id k3_m4_of_match[] = {
- { .compatible = "ti,am64-m4fss", },
+ { .compatible = "ti,am64-m4fss", .data = &am64_m4_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_m4_of_match);
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index dbc513c5569c..e34c04c135fc 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -26,6 +26,7 @@
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
+#include "ti_k3_common.h"
/* This address can either be for ATCM or BTCM with the other at address 0x0 */
#define K3_R5_TCM_DEV_ADDR 0x41010000
@@ -55,20 +56,6 @@
/* Applicable to only AM64x SoCs */
#define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200
-/**
- * struct k3_r5_mem - internal memory structure
- * @cpu_addr: MPU virtual address of the memory region
- * @bus_addr: Bus address used to access the memory region
- * @dev_addr: Device address from remoteproc view
- * @size: Size of the memory region
- */
-struct k3_r5_mem {
- void __iomem *cpu_addr;
- phys_addr_t bus_addr;
- u32 dev_addr;
- size_t size;
-};
-
/*
* All cluster mode values are not applicable on all SoCs. The following
* are the modes supported on various SoCs:
@@ -90,12 +77,14 @@ enum cluster_mode {
* @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
* @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
* @is_single_core: flag to denote if SoC/IP has only single core R5
+ * @core_data: pointer to R5-core-specific device data
*/
struct k3_r5_soc_data {
bool tcm_is_double;
bool tcm_ecc_autoinit;
bool single_cpu_mode;
bool is_single_core;
+ const struct k3_rproc_dev_data *core_data;
};
/**
@@ -118,15 +107,10 @@ struct k3_r5_cluster {
* struct k3_r5_core - K3 R5 core structure
* @elem: linked list item
* @dev: cached device pointer
- * @rproc: rproc handle representing this core
- * @mem: internal memory regions data
+ * @kproc: K3 rproc handle representing this core
+ * @cluster: cached pointer to parent cluster structure
* @sram: on-chip SRAM memory regions data
- * @num_mems: number of internal memory regions
* @num_sram: number of on-chip SRAM memory regions
- * @reset: reset control handle
- * @tsp: TI-SCI processor control handle
- * @ti_sci: TI-SCI handle
- * @ti_sci_id: TI-SCI device identifier
* @atcm_enable: flag to control ATCM enablement
* @btcm_enable: flag to control BTCM enablement
* @loczrama: flag to dictate which TCM is at device address 0x0
@@ -135,157 +119,58 @@ struct k3_r5_cluster {
struct k3_r5_core {
struct list_head elem;
struct device *dev;
- struct rproc *rproc;
- struct k3_r5_mem *mem;
- struct k3_r5_mem *sram;
- int num_mems;
+ struct k3_rproc *kproc;
+ struct k3_r5_cluster *cluster;
+ struct k3_rproc_mem *sram;
int num_sram;
- struct reset_control *reset;
- struct ti_sci_proc *tsp;
- const struct ti_sci_handle *ti_sci;
- u32 ti_sci_id;
u32 atcm_enable;
u32 btcm_enable;
u32 loczrama;
bool released_from_reset;
};
-/**
- * struct k3_r5_rproc - K3 remote processor state
- * @dev: cached device pointer
- * @cluster: cached pointer to parent cluster structure
- * @mbox: mailbox channel handle
- * @client: mailbox client to request the mailbox channel
- * @rproc: rproc handle
- * @core: cached pointer to r5 core structure being used
- * @rmem: reserved memory regions data
- * @num_rmems: number of reserved memory regions
- */
-struct k3_r5_rproc {
- struct device *dev;
- struct k3_r5_cluster *cluster;
- struct mbox_chan *mbox;
- struct mbox_client client;
- struct rproc *rproc;
- struct k3_r5_core *core;
- struct k3_r5_mem *rmem;
- int num_rmems;
-};
-
-/**
- * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
- * @client: mailbox client pointer used for requesting the mailbox channel
- * @data: mailbox payload
- *
- * This handler is invoked by the OMAP mailbox driver whenever a mailbox
- * message is received. Usually, the mailbox payload simply contains
- * the index of the virtqueue that is kicked by the remote processor,
- * and we let remoteproc core handle it.
- *
- * In addition to virtqueue indices, we also have some out-of-band values
- * that indicate different events. Those values are deliberately very
- * large so they don't coincide with virtqueue indices.
- */
-static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
-{
- struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
- client);
- struct device *dev = kproc->rproc->dev.parent;
- const char *name = kproc->rproc->name;
- u32 msg = omap_mbox_message(data);
-
- /* Do not forward message from a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- dev_dbg(dev, "mbox msg: 0x%x\n", msg);
-
- switch (msg) {
- case RP_MBOX_CRASH:
- /*
- * remoteproc detected an exception, but error recovery is not
- * supported. So, just log this for now
- */
- dev_err(dev, "K3 R5F rproc %s crashed\n", name);
- break;
- case RP_MBOX_ECHO_REPLY:
- dev_info(dev, "received echo reply from %s\n", name);
- break;
- default:
- /* silently handle all other valid messages */
- if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
- return;
- if (msg > kproc->rproc->max_notifyid) {
- dev_dbg(dev, "dropping unknown message 0x%x", msg);
- return;
- }
- /* msg contains the index of the triggered vring */
- if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
- dev_dbg(dev, "no message was found in vqid %d\n", msg);
- }
-}
-
-/* kick a virtqueue */
-static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = rproc->dev.parent;
- mbox_msg_t msg = (mbox_msg_t)vqid;
- int ret;
-
- /* Do not forward message to a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- /* send the index of the triggered virtqueue in the mailbox payload */
- ret = mbox_send_message(kproc->mbox, (void *)msg);
- if (ret < 0)
- dev_err(dev, "failed to send mailbox message, status = %d\n",
- ret);
-}
-
-static int k3_r5_split_reset(struct k3_r5_core *core)
+static int k3_r5_split_reset(struct k3_rproc *kproc)
{
int ret;
- ret = reset_control_assert(core->reset);
+ ret = reset_control_assert(kproc->reset);
if (ret) {
- dev_err(core->dev, "local-reset assert failed, ret = %d\n",
+ dev_err(kproc->dev, "local-reset assert failed, ret = %d\n",
ret);
return ret;
}
- ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id);
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
- dev_err(core->dev, "module-reset assert failed, ret = %d\n",
+ dev_err(kproc->dev, "module-reset assert failed, ret = %d\n",
ret);
- if (reset_control_deassert(core->reset))
- dev_warn(core->dev, "local-reset deassert back failed\n");
+ if (reset_control_deassert(kproc->reset))
+ dev_warn(kproc->dev, "local-reset deassert back failed\n");
}
return ret;
}
-static int k3_r5_split_release(struct k3_r5_core *core)
+static int k3_r5_split_release(struct k3_rproc *kproc)
{
int ret;
- ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
- core->ti_sci_id);
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
- dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ dev_err(kproc->dev, "module-reset deassert failed, ret = %d\n",
ret);
return ret;
}
- ret = reset_control_deassert(core->reset);
+ ret = reset_control_deassert(kproc->reset);
if (ret) {
- dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
+ dev_err(kproc->dev, "local-reset deassert failed, ret = %d\n",
ret);
- if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id))
- dev_warn(core->dev, "module-reset assert back failed\n");
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(kproc->dev, "module-reset assert back failed\n");
}
return ret;
@@ -294,11 +179,12 @@ static int k3_r5_split_release(struct k3_r5_core *core)
static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
{
struct k3_r5_core *core;
+ struct k3_rproc *kproc;
int ret;
/* assert local reset on all applicable cores */
list_for_each_entry(core, &cluster->cores, elem) {
- ret = reset_control_assert(core->reset);
+ ret = reset_control_assert(core->kproc->reset);
if (ret) {
dev_err(core->dev, "local-reset assert failed, ret = %d\n",
ret);
@@ -309,8 +195,9 @@ static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
/* disable PSC modules on all applicable cores */
list_for_each_entry(core, &cluster->cores, elem) {
- ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id);
+ kproc = core->kproc;
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset assert failed, ret = %d\n",
ret);
@@ -322,14 +209,15 @@ static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
unroll_module_reset:
list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
- if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id))
+ kproc = core->kproc;
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
dev_warn(core->dev, "module-reset assert back failed\n");
}
core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
unroll_local_reset:
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
- if (reset_control_deassert(core->reset))
+ if (reset_control_deassert(core->kproc->reset))
dev_warn(core->dev, "local-reset deassert back failed\n");
}
@@ -339,12 +227,14 @@ unroll_local_reset:
static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
{
struct k3_r5_core *core;
+ struct k3_rproc *kproc;
int ret;
/* enable PSC modules on all applicable cores */
list_for_each_entry_reverse(core, &cluster->cores, elem) {
- ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
- core->ti_sci_id);
+ kproc = core->kproc;
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
ret);
@@ -355,7 +245,7 @@ static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
/* deassert local reset on all applicable cores */
list_for_each_entry_reverse(core, &cluster->cores, elem) {
- ret = reset_control_deassert(core->reset);
+ ret = reset_control_deassert(core->kproc->reset);
if (ret) {
dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
ret);
@@ -367,67 +257,33 @@ static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
unroll_local_reset:
list_for_each_entry_continue(core, &cluster->cores, elem) {
- if (reset_control_assert(core->reset))
+ if (reset_control_assert(core->kproc->reset))
dev_warn(core->dev, "local-reset assert back failed\n");
}
core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
unroll_module_reset:
list_for_each_entry_from(core, &cluster->cores, elem) {
- if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id))
+ kproc = core->kproc;
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
dev_warn(core->dev, "module-reset assert back failed\n");
}
return ret;
}
-static inline int k3_r5_core_halt(struct k3_r5_core *core)
+static inline int k3_r5_core_halt(struct k3_rproc *kproc)
{
- return ti_sci_proc_set_control(core->tsp,
+ return ti_sci_proc_set_control(kproc->tsp,
PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
}
-static inline int k3_r5_core_run(struct k3_r5_core *core)
+static inline int k3_r5_core_run(struct k3_rproc *kproc)
{
- return ti_sci_proc_set_control(core->tsp,
+ return ti_sci_proc_set_control(kproc->tsp,
0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
}
-static int k3_r5_rproc_request_mbox(struct rproc *rproc)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct mbox_client *client = &kproc->client;
- struct device *dev = kproc->dev;
- int ret;
-
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_r5_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox))
- return dev_err_probe(dev, PTR_ERR(kproc->mbox),
- "mbox_request_channel failed\n");
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- mbox_free_channel(kproc->mbox);
- return ret;
- }
-
- return 0;
-}
-
/*
* The R5F cores have controls for both a reset and a halt/run. The code
* execution from DDR requires the initial boot-strapping code to be run
@@ -446,16 +302,39 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
*/
static int k3_r5_rproc_prepare(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv, *core0, *core1;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
u32 ctrl = 0, cfg = 0, stat = 0;
u64 boot_vec = 0;
bool mem_init_dis;
int ret;
- ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
+ /*
+ * R5 cores require to be powered on sequentially, core0 should be in
+ * higher power state than core1 in a cluster. So, wait for core0 to
+ * power up before proceeding to core1 and put timeout of 2sec. This
+ * waiting mechanism is necessary because rproc_auto_boot_callback() for
+ * core1 can be called before core0 due to thread execution order.
+ *
+ * By placing the wait mechanism here in .prepare() ops, this condition
+ * is enforced for rproc boot requests from sysfs as well.
+ */
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1 &&
+ !core0->released_from_reset) {
+ ret = wait_event_interruptible_timeout(cluster->core_transition,
+ core0->released_from_reset,
+ msecs_to_jiffies(2000));
+ if (ret <= 0) {
+ dev_err(dev, "can not power up core1 before core0");
+ return -EPERM;
+ }
+ }
+
+ ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl, &stat);
if (ret < 0)
return ret;
mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
@@ -463,7 +342,7 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
/* Re-use LockStep-mode reset logic for Single-CPU mode */
ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU) ?
- k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
+ k3_r5_lockstep_release(cluster) : k3_r5_split_release(kproc);
if (ret) {
dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
ret);
@@ -471,6 +350,14 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
}
/*
+ * Notify all threads in the wait queue when core0 state has changed so
+ * that threads waiting for this condition can be executed.
+ */
+ core->released_from_reset = true;
+ if (core == core0)
+ wake_up_interruptible(&cluster->core_transition);
+
+ /*
* Newer IP revisions like on J7200 SoCs support h/w auto-initialization
* of TCMs, so there is no need to perform the s/w memzero. This bit is
* configurable through System Firmware, the default value does perform
@@ -487,10 +374,10 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
* can be effective on all TCM addresses.
*/
dev_dbg(dev, "zeroing out ATCM memory\n");
- memset_io(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
+ memset_io(kproc->mem[0].cpu_addr, 0x00, kproc->mem[0].size);
dev_dbg(dev, "zeroing out BTCM memory\n");
- memset_io(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
+ memset_io(kproc->mem[1].cpu_addr, 0x00, kproc->mem[1].size);
return 0;
}
@@ -513,19 +400,47 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
*/
static int k3_r5_rproc_unprepare(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv, *core0, *core1;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
int ret;
+ /*
+ * Ensure power-down of cores is sequential in split mode. Core1 must
+ * power down before Core0 to maintain the expected state. By placing
+ * the wait mechanism here in .unprepare() ops, this condition is
+ * enforced for rproc stop or shutdown requests from sysfs and device
+ * removal as well.
+ */
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core0 &&
+ core1->released_from_reset) {
+ ret = wait_event_interruptible_timeout(cluster->core_transition,
+ !core1->released_from_reset,
+ msecs_to_jiffies(2000));
+ if (ret <= 0) {
+ dev_err(dev, "can not power down core0 before core1");
+ return -EPERM;
+ }
+ }
+
/* Re-use LockStep-mode reset logic for Single-CPU mode */
ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU) ?
- k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
+ k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(kproc);
if (ret)
dev_err(dev, "unable to disable cores, ret = %d\n", ret);
+ /*
+ * Notify all threads in the wait queue when core1 state has changed so
+ * that threads waiting for this condition can be executed.
+ */
+ core->released_from_reset = false;
+ if (core == core1)
+ wake_up_interruptible(&cluster->core_transition);
+
return ret;
}
@@ -548,10 +463,10 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
*/
static int k3_r5_rproc_start(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
- struct k3_r5_core *core0, *core;
u32 boot_addr;
int ret;
@@ -560,41 +475,28 @@ static int k3_r5_rproc_start(struct rproc *rproc)
dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
/* boot vector need not be programmed for Core1 in LockStep mode */
- core = kproc->core;
- ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
+ ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
if (ret)
return ret;
/* unhalt/run all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
list_for_each_entry_reverse(core, &cluster->cores, elem) {
- ret = k3_r5_core_run(core);
+ ret = k3_r5_core_run(core->kproc);
if (ret)
goto unroll_core_run;
}
} else {
- /* do not allow core 1 to start before core 0 */
- core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
- elem);
- if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
- dev_err(dev, "%s: can not start core 1 before core 0\n",
- __func__);
- return -EPERM;
- }
-
- ret = k3_r5_core_run(core);
+ ret = k3_r5_core_run(core->kproc);
if (ret)
return ret;
-
- core->released_from_reset = true;
- wake_up_interruptible(&cluster->core_transition);
}
return 0;
unroll_core_run:
list_for_each_entry_continue(core, &cluster->cores, elem) {
- if (k3_r5_core_halt(core))
+ if (k3_r5_core_halt(core->kproc))
dev_warn(core->dev, "core halt back failed\n");
}
return ret;
@@ -626,33 +528,22 @@ unroll_core_run:
*/
static int k3_r5_rproc_stop(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct device *dev = kproc->dev;
- struct k3_r5_core *core1, *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
int ret;
/* halt all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
list_for_each_entry(core, &cluster->cores, elem) {
- ret = k3_r5_core_halt(core);
+ ret = k3_r5_core_halt(core->kproc);
if (ret) {
core = list_prev_entry(core, elem);
goto unroll_core_halt;
}
}
} else {
- /* do not allow core 0 to stop before core 1 */
- core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
- elem);
- if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
- dev_err(dev, "%s: can not stop core 0 before core 1\n",
- __func__);
- ret = -EPERM;
- goto out;
- }
-
- ret = k3_r5_core_halt(core);
+ ret = k3_r5_core_halt(core->kproc);
if (ret)
goto out;
}
@@ -661,7 +552,7 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
unroll_core_halt:
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
- if (k3_r5_core_run(core))
+ if (k3_r5_core_run(core->kproc))
dev_warn(core->dev, "core run back failed\n");
}
out:
@@ -669,58 +560,6 @@ out:
}
/*
- * Attach to a running R5F remote processor (IPC-only mode)
- *
- * The R5F attach callback is a NOP. The remote processor is already booted, and
- * all required resources have been acquired during probe routine, so there is
- * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
- * This callback is invoked only in IPC-only mode and exists because
- * rproc_validate() checks for its existence.
- */
-static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
-
-/*
- * Detach from a running R5F remote processor (IPC-only mode)
- *
- * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
- * left in booted state in IPC-only mode. This callback is invoked only in
- * IPC-only mode and exists for sanity sake.
- */
-static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
-
-/*
- * This function implements the .get_loaded_rsc_table() callback and is used
- * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F
- * firmwares follow a design-by-contract approach and are expected to have the
- * resource table at the base of the DDR region reserved for firmware usage.
- * This provides flexibility for the remote processor to be booted by different
- * bootloaders that may or may not have the ability to publish the resource table
- * address and size through a DT property. This callback is invoked only in
- * IPC-only mode.
- */
-static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
- size_t *rsc_table_sz)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- if (!kproc->rmem[0].cpu_addr) {
- dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * NOTE: The resource table size is currently hard-coded to a maximum
- * of 256 bytes. The most common resource table usage for K3 firmwares
- * is to only have the vdev resource entry and an optional trace entry.
- * The exact size could be computed based on resource table address, but
- * the hard-coded value suffices to support the IPC-only mode.
- */
- *rsc_table_sz = 256;
- return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
-}
-
-/*
* Internal Memory translation helper
*
* Custom function implementing the rproc .da_to_va ops to provide address
@@ -730,10 +569,9 @@ static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
*/
static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_core *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv;
void __iomem *va = NULL;
- phys_addr_t bus_addr;
u32 dev_addr, offset;
size_t size;
int i;
@@ -741,27 +579,6 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool
if (len == 0)
return NULL;
- /* handle both R5 and SoC views of ATCM and BTCM */
- for (i = 0; i < core->num_mems; i++) {
- bus_addr = core->mem[i].bus_addr;
- dev_addr = core->mem[i].dev_addr;
- size = core->mem[i].size;
-
- /* handle R5-view addresses of TCMs */
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = core->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
-
- /* handle SoC-view addresses of TCMs */
- if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
- offset = da - bus_addr;
- va = core->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
/* handle any SRAM regions using SoC-view addresses */
for (i = 0; i < core->num_sram; i++) {
dev_addr = core->sram[i].dev_addr;
@@ -774,19 +591,8 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool
}
}
- /* handle static DDR reserved memory regions */
- for (i = 0; i < kproc->num_rmems; i++) {
- dev_addr = kproc->rmem[i].dev_addr;
- size = kproc->rmem[i].size;
-
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->rmem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- return NULL;
+ /* handle both TCM and DDR memory regions */
+ return k3_rproc_da_to_va(rproc, da, len, is_iomem);
}
static const struct rproc_ops k3_r5_rproc_ops = {
@@ -794,7 +600,7 @@ static const struct rproc_ops k3_r5_rproc_ops = {
.unprepare = k3_r5_rproc_unprepare,
.start = k3_r5_rproc_start,
.stop = k3_r5_rproc_stop,
- .kick = k3_r5_rproc_kick,
+ .kick = k3_rproc_kick,
.da_to_va = k3_r5_rproc_da_to_va,
};
@@ -833,11 +639,11 @@ static const struct rproc_ops k3_r5_rproc_ops = {
* both the cores with the same settings, before reconfiguing again for
* LockStep mode.
*/
-static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
+static int k3_r5_rproc_configure(struct k3_rproc *kproc)
{
- struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *temp, *core0, *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
- struct k3_r5_core *core0, *core, *temp;
u32 ctrl = 0, cfg = 0, stat = 0;
u32 set_cfg = 0, clr_cfg = 0;
u64 boot_vec = 0;
@@ -851,10 +657,10 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
cluster->mode == CLUSTER_MODE_SINGLECORE) {
core = core0;
} else {
- core = kproc->core;
+ core = kproc->priv;
}
- ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ ret = ti_sci_proc_get_status(core->kproc->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0)
return ret;
@@ -924,7 +730,7 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
* and TEINIT config is only allowed with Core0.
*/
list_for_each_entry(temp, &cluster->cores, elem) {
- ret = k3_r5_core_halt(temp);
+ ret = k3_r5_core_halt(temp->kproc);
if (ret)
goto out;
@@ -932,7 +738,7 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
}
- ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
+ ret = ti_sci_proc_set_config(temp->kproc->tsp, boot_vec,
set_cfg, clr_cfg);
if (ret)
goto out;
@@ -940,14 +746,14 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
clr_cfg = 0;
- ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec,
set_cfg, clr_cfg);
} else {
- ret = k3_r5_core_halt(core);
+ ret = k3_r5_core_halt(core->kproc);
if (ret)
goto out;
- ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec,
set_cfg, clr_cfg);
}
@@ -955,93 +761,6 @@ out:
return ret;
}
-static void k3_r5_mem_release(void *data)
-{
- struct device *dev = data;
-
- of_reserved_mem_device_release(dev);
-}
-
-static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- struct device_node *np = dev_of_node(dev);
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
- int num_rmems;
- int ret, i;
-
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
- if (num_rmems <= 0) {
- dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
- num_rmems);
- return -EINVAL;
- }
- if (num_rmems < 2) {
- dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
- num_rmems);
- return -EINVAL;
- }
-
- /* use reserved memory region 0 for vring DMA allocations */
- ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
- if (ret) {
- dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
- ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev);
- if (ret)
- return ret;
-
- num_rmems--;
- kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem)
- return -ENOMEM;
-
- /* use remaining reserved memory regions for static carveouts */
- for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
-
- kproc->rmem[i].bus_addr = rmem->base;
- /*
- * R5Fs do not have an MMU, but have a Region Address Translator
- * (RAT) module that provides a fixed entry translation between
- * the 32-bit processor addresses to 64-bit bus addresses. The
- * RAT is programmable only by the R5F cores. Support for RAT
- * is currently not supported, so 64-bit address regions are not
- * supported. The absence of MMUs implies that the R5F device
- * addresses/supported memory regions are restricted to 32-bit
- * bus addresses, and are identical
- */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i + 1, &kproc->rmem[i].bus_addr,
- kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
- kproc->rmem[i].dev_addr);
- }
- kproc->num_rmems = num_rmems;
-
- return 0;
-}
-
/*
* Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
* split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
@@ -1055,12 +774,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
* supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only
* half the original size in Split mode.
*/
-static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
+static void k3_r5_adjust_tcm_sizes(struct k3_rproc *kproc)
{
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_r5_core *core0, *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *cdev = core->dev;
- struct k3_r5_core *core0;
if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU ||
@@ -1070,14 +788,14 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
if (core == core0) {
- WARN_ON(core->mem[0].size != SZ_64K);
- WARN_ON(core->mem[1].size != SZ_64K);
+ WARN_ON(kproc->mem[0].size != SZ_64K);
+ WARN_ON(kproc->mem[1].size != SZ_64K);
- core->mem[0].size /= 2;
- core->mem[1].size /= 2;
+ kproc->mem[0].size /= 2;
+ kproc->mem[1].size /= 2;
dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
- core->mem[0].size, core->mem[1].size);
+ kproc->mem[0].size, kproc->mem[1].size);
}
}
@@ -1094,24 +812,23 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
* actual values configured by bootloader. The driver internal device memory
* addresses for TCMs are also updated.
*/
-static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+static int k3_r5_rproc_configure_mode(struct k3_rproc *kproc)
{
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_r5_core *core0, *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *cdev = core->dev;
bool r_state = false, c_state = false, lockstep_en = false, single_cpu = false;
u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
u64 boot_vec = 0;
u32 atcm_enable, btcm_enable, loczrama;
- struct k3_r5_core *core0;
enum cluster_mode mode = cluster->mode;
int reset_ctrl_status;
int ret;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
- ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
- &r_state, &c_state);
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ &r_state, &c_state);
if (ret) {
dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
ret);
@@ -1122,7 +839,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
r_state, c_state);
}
- reset_ctrl_status = reset_control_status(core->reset);
+ reset_ctrl_status = reset_control_status(kproc->reset);
if (reset_ctrl_status < 0) {
dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
reset_ctrl_status);
@@ -1135,7 +852,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
*/
core->released_from_reset = c_state;
- ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0) {
dev_err(cdev, "failed to get initial processor status, ret = %d\n",
@@ -1170,10 +887,10 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
kproc->rproc->ops->unprepare = NULL;
kproc->rproc->ops->start = NULL;
kproc->rproc->ops->stop = NULL;
- kproc->rproc->ops->attach = k3_r5_rproc_attach;
- kproc->rproc->ops->detach = k3_r5_rproc_detach;
+ kproc->rproc->ops->attach = k3_rproc_attach;
+ kproc->rproc->ops->detach = k3_rproc_detach;
kproc->rproc->ops->get_loaded_rsc_table =
- k3_r5_get_loaded_rsc_table;
+ k3_get_loaded_rsc_table;
} else if (!c_state) {
dev_info(cdev, "configured R5F for remoteproc mode\n");
ret = 0;
@@ -1192,19 +909,121 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
core->atcm_enable = atcm_enable;
core->btcm_enable = btcm_enable;
core->loczrama = loczrama;
- core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
- core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
+ kproc->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
+ kproc->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
}
return ret;
}
+static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
+ struct k3_rproc *kproc)
+{
+ const struct k3_rproc_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct k3_r5_core *core = kproc->priv;
+ int num_mems;
+ int i, ret;
+
+ num_mems = data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems, sizeof(*kproc->mem),
+ GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ ret = k3_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_mems; i++) {
+ /*
+ * TODO:
+ * The R5F cores can place ATCM & BTCM anywhere in its address
+ * based on the corresponding Region Registers in the System
+ * Control coprocessor. For now, place ATCM and BTCM at
+ * addresses 0 and 0x41010000 (same as the bus address on AM65x
+ * SoCs) based on loczrama setting overriding default assignment
+ * done by k3_rproc_of_get_memories().
+ */
+ if (!strcmp(data->mems[i].name, "atcm")) {
+ kproc->mem[i].dev_addr = core->loczrama ?
+ 0 : K3_R5_TCM_DEV_ADDR;
+ } else {
+ kproc->mem[i].dev_addr = core->loczrama ?
+ K3_R5_TCM_DEV_ADDR : 0;
+ }
+
+ dev_dbg(dev, "Updating bus addr %pa of memory %5s\n",
+ &kproc->mem[i].bus_addr, data->mems[i].name);
+ }
+
+ return 0;
+}
+
+static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
+ struct k3_r5_core *core)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *sram_np;
+ struct resource res;
+ int num_sram;
+ int i, ret;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
+ num_sram);
+ return 0;
+ }
+
+ core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
+ if (!core->sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np)
+ return -EINVAL;
+
+ if (!of_device_is_available(sram_np)) {
+ of_node_put(sram_np);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &res);
+ of_node_put(sram_np);
+ if (ret)
+ return -EINVAL;
+
+ core->sram[i].bus_addr = res.start;
+ core->sram[i].dev_addr = res.start;
+ core->sram[i].size = resource_size(&res);
+ core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
+ resource_size(&res));
+ if (!core->sram[i].cpu_addr) {
+ dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
+ i, &res.start);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i, &core->sram[i].bus_addr,
+ core->sram[i].size, core->sram[i].cpu_addr,
+ core->sram[i].dev_addr);
+ }
+ core->num_sram = num_sram;
+
+ return 0;
+}
+
static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- struct k3_r5_rproc *kproc;
+ struct k3_rproc *kproc;
struct k3_r5_core *core, *core1;
+ struct device_node *np;
struct device *cdev;
const char *fw_name;
struct rproc *rproc;
@@ -1213,6 +1032,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
list_for_each_entry(core, &cluster->cores, elem) {
cdev = core->dev;
+ np = dev_of_node(cdev);
ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
if (ret) {
dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
@@ -1233,13 +1053,66 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
rproc->recovery_disabled = true;
kproc = rproc->priv;
- kproc->cluster = cluster;
- kproc->core = core;
+ kproc->priv = core;
kproc->dev = cdev;
kproc->rproc = rproc;
- core->rproc = rproc;
+ kproc->data = cluster->soc_data->core_data;
+ core->kproc = kproc;
+
+ kproc->ti_sci = devm_ti_sci_get_by_phandle(cdev, "ti,sci");
+ if (IS_ERR(kproc->ti_sci)) {
+ ret = dev_err_probe(cdev, PTR_ERR(kproc->ti_sci),
+ "failed to get ti-sci handle\n");
+ kproc->ti_sci = NULL;
+ goto out;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret) {
+ dev_err(cdev, "missing 'ti,sci-dev-id' property\n");
+ goto out;
+ }
- ret = k3_r5_rproc_request_mbox(rproc);
+ kproc->reset = devm_reset_control_get_exclusive(cdev, NULL);
+ if (IS_ERR_OR_NULL(kproc->reset)) {
+ ret = PTR_ERR_OR_ZERO(kproc->reset);
+ if (!ret)
+ ret = -ENODEV;
+ dev_err_probe(cdev, ret, "failed to get reset handle\n");
+ goto out;
+ }
+
+ kproc->tsp = ti_sci_proc_of_get_tsp(cdev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp)) {
+ ret = dev_err_probe(cdev, PTR_ERR(kproc->tsp),
+ "failed to construct ti-sci proc control\n");
+ goto out;
+ }
+
+ ret = k3_r5_core_of_get_internal_memories(to_platform_device(cdev), kproc);
+ if (ret) {
+ dev_err(cdev, "failed to get internal memories, ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0) {
+ dev_err(cdev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto out;
+ }
+
+ ret = devm_add_action_or_reset(cdev, k3_release_tsp, kproc->tsp);
+ if (ret)
+ goto out;
+ }
+
+ list_for_each_entry(core, &cluster->cores, elem) {
+ cdev = core->dev;
+ kproc = core->kproc;
+ rproc = kproc->rproc;
+
+ ret = k3_rproc_request_mbox(rproc);
if (ret)
return ret;
@@ -1251,7 +1124,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
ret = k3_r5_rproc_configure(kproc);
if (ret) {
- dev_err(dev, "initial configure failed, ret = %d\n",
+ dev_err(cdev, "initial configure failed, ret = %d\n",
ret);
goto out;
}
@@ -1259,16 +1132,16 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
init_rmem:
k3_r5_adjust_tcm_sizes(kproc);
- ret = k3_r5_reserved_mem_init(kproc);
+ ret = k3_reserved_mem_init(kproc);
if (ret) {
- dev_err(dev, "reserved memory init failed, ret = %d\n",
+ dev_err(cdev, "reserved memory init failed, ret = %d\n",
ret);
goto out;
}
- ret = devm_rproc_add(dev, rproc);
+ ret = devm_rproc_add(cdev, rproc);
if (ret) {
- dev_err_probe(dev, ret, "rproc_add failed\n");
+ dev_err_probe(cdev, ret, "rproc_add failed\n");
goto out;
}
@@ -1279,26 +1152,6 @@ init_rmem:
cluster->mode == CLUSTER_MODE_SINGLECPU ||
cluster->mode == CLUSTER_MODE_SINGLECORE)
break;
-
- /*
- * R5 cores require to be powered on sequentially, core0
- * should be in higher power state than core1 in a cluster
- * So, wait for current core to power up before proceeding
- * to next core and put timeout of 2sec for each core.
- *
- * This waiting mechanism is necessary because
- * rproc_auto_boot_callback() for core1 can be called before
- * core0 due to thread execution order.
- */
- ret = wait_event_interruptible_timeout(cluster->core_transition,
- core->released_from_reset,
- msecs_to_jiffies(2000));
- if (ret <= 0) {
- dev_err(dev,
- "Timed out waiting for %s core to power up!\n",
- rproc->name);
- goto out;
- }
}
return 0;
@@ -1317,8 +1170,8 @@ out:
/* undo core0 upon any failures on core1 in split-mode */
if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
core = list_prev_entry(core, elem);
- rproc = core->rproc;
- kproc = rproc->priv;
+ kproc = core->kproc;
+ rproc = kproc->rproc;
goto err_split;
}
return ret;
@@ -1327,7 +1180,7 @@ out:
static void k3_r5_cluster_rproc_exit(void *data)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(data);
- struct k3_r5_rproc *kproc;
+ struct k3_rproc *kproc;
struct k3_r5_core *core;
struct rproc *rproc;
int ret;
@@ -1343,8 +1196,8 @@ static void k3_r5_cluster_rproc_exit(void *data)
list_last_entry(&cluster->cores, struct k3_r5_core, elem);
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
- rproc = core->rproc;
- kproc = rproc->priv;
+ kproc = core->kproc;
+ rproc = kproc->rproc;
if (rproc->state == RPROC_ATTACHED) {
ret = rproc_detach(rproc);
@@ -1358,142 +1211,6 @@ static void k3_r5_cluster_rproc_exit(void *data)
}
}
-static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
- struct k3_r5_core *core)
-{
- static const char * const mem_names[] = {"atcm", "btcm"};
- struct device *dev = &pdev->dev;
- struct resource *res;
- int num_mems;
- int i;
-
- num_mems = ARRAY_SIZE(mem_names);
- core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
- if (!core->mem)
- return -ENOMEM;
-
- for (i = 0; i < num_mems; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- mem_names[i]);
- if (!res) {
- dev_err(dev, "found no memory resource for %s\n",
- mem_names[i]);
- return -EINVAL;
- }
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res),
- dev_name(dev))) {
- dev_err(dev, "could not request %s region for resource\n",
- mem_names[i]);
- return -EBUSY;
- }
-
- /*
- * TCMs are designed in general to support RAM-like backing
- * memories. So, map these as Normal Non-Cached memories. This
- * also avoids/fixes any potential alignment faults due to
- * unaligned data accesses when using memcpy() or memset()
- * functions (normally seen with device type memory).
- */
- core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
- resource_size(res));
- if (!core->mem[i].cpu_addr) {
- dev_err(dev, "failed to map %s memory\n", mem_names[i]);
- return -ENOMEM;
- }
- core->mem[i].bus_addr = res->start;
-
- /*
- * TODO:
- * The R5F cores can place ATCM & BTCM anywhere in its address
- * based on the corresponding Region Registers in the System
- * Control coprocessor. For now, place ATCM and BTCM at
- * addresses 0 and 0x41010000 (same as the bus address on AM65x
- * SoCs) based on loczrama setting
- */
- if (!strcmp(mem_names[i], "atcm")) {
- core->mem[i].dev_addr = core->loczrama ?
- 0 : K3_R5_TCM_DEV_ADDR;
- } else {
- core->mem[i].dev_addr = core->loczrama ?
- K3_R5_TCM_DEV_ADDR : 0;
- }
- core->mem[i].size = resource_size(res);
-
- dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- mem_names[i], &core->mem[i].bus_addr,
- core->mem[i].size, core->mem[i].cpu_addr,
- core->mem[i].dev_addr);
- }
- core->num_mems = num_mems;
-
- return 0;
-}
-
-static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
- struct k3_r5_core *core)
-{
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct device_node *sram_np;
- struct resource res;
- int num_sram;
- int i, ret;
-
- num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
- if (num_sram <= 0) {
- dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
- num_sram);
- return 0;
- }
-
- core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
- if (!core->sram)
- return -ENOMEM;
-
- for (i = 0; i < num_sram; i++) {
- sram_np = of_parse_phandle(np, "sram", i);
- if (!sram_np)
- return -EINVAL;
-
- if (!of_device_is_available(sram_np)) {
- of_node_put(sram_np);
- return -EINVAL;
- }
-
- ret = of_address_to_resource(sram_np, 0, &res);
- of_node_put(sram_np);
- if (ret)
- return -EINVAL;
-
- core->sram[i].bus_addr = res.start;
- core->sram[i].dev_addr = res.start;
- core->sram[i].size = resource_size(&res);
- core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
- resource_size(&res));
- if (!core->sram[i].cpu_addr) {
- dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
- i, &res.start);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i, &core->sram[i].bus_addr,
- core->sram[i].size, core->sram[i].cpu_addr,
- core->sram[i].dev_addr);
- }
- core->num_sram = num_sram;
-
- return 0;
-}
-
-static void k3_r5_release_tsp(void *data)
-{
- struct ti_sci_proc *tsp = data;
-
- ti_sci_proc_release(tsp);
-}
-
static int k3_r5_core_of_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1539,58 +1256,12 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
goto err;
}
- core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
- if (IS_ERR(core->ti_sci)) {
- ret = dev_err_probe(dev, PTR_ERR(core->ti_sci), "failed to get ti-sci handle\n");
- core->ti_sci = NULL;
- goto err;
- }
-
- ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
- if (ret) {
- dev_err(dev, "missing 'ti,sci-dev-id' property\n");
- goto err;
- }
-
- core->reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR_OR_NULL(core->reset)) {
- ret = PTR_ERR_OR_ZERO(core->reset);
- if (!ret)
- ret = -ENODEV;
- dev_err_probe(dev, ret, "failed to get reset handle\n");
- goto err;
- }
-
- core->tsp = ti_sci_proc_of_get_tsp(dev, core->ti_sci);
- if (IS_ERR(core->tsp)) {
- ret = dev_err_probe(dev, PTR_ERR(core->tsp),
- "failed to construct ti-sci proc control\n");
- goto err;
- }
-
- ret = k3_r5_core_of_get_internal_memories(pdev, core);
- if (ret) {
- dev_err(dev, "failed to get internal memories, ret = %d\n",
- ret);
- goto err;
- }
-
ret = k3_r5_core_of_get_sram_memories(pdev, core);
if (ret) {
dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
goto err;
}
- ret = ti_sci_proc_request(core->tsp);
- if (ret < 0) {
- dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
- goto err;
- }
-
- ret = devm_add_action_or_reset(dev, k3_r5_release_tsp, core->tsp);
- if (ret)
- goto err;
-
platform_set_drvdata(pdev, core);
devres_close_group(dev, k3_r5_core_of_init);
@@ -1652,6 +1323,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
}
core = platform_get_drvdata(cpdev);
+ core->cluster = cluster;
put_device(&cpdev->dev);
list_add_tail(&core->elem, &cluster->cores);
}
@@ -1749,11 +1421,24 @@ static int k3_r5_probe(struct platform_device *pdev)
return 0;
}
+static const struct k3_rproc_mem_data r5_mems[] = {
+ { .name = "atcm", .dev_addr = 0x0 },
+ { .name = "btcm", .dev_addr = K3_R5_TCM_DEV_ADDR },
+};
+
+static const struct k3_rproc_dev_data r5_data = {
+ .mems = r5_mems,
+ .num_mems = ARRAY_SIZE(r5_mems),
+ .boot_align_addr = 0,
+ .uses_lreset = true,
+};
+
static const struct k3_r5_soc_data am65_j721e_soc_data = {
.tcm_is_double = false,
.tcm_ecc_autoinit = false,
.single_cpu_mode = false,
.is_single_core = false,
+ .core_data = &r5_data,
};
static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
@@ -1761,6 +1446,7 @@ static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
.is_single_core = false,
+ .core_data = &r5_data,
};
static const struct k3_r5_soc_data am64_soc_data = {
@@ -1768,6 +1454,7 @@ static const struct k3_r5_soc_data am64_soc_data = {
.tcm_ecc_autoinit = true,
.single_cpu_mode = true,
.is_single_core = false,
+ .core_data = &r5_data,
};
static const struct k3_r5_soc_data am62_soc_data = {
@@ -1775,6 +1462,7 @@ static const struct k3_r5_soc_data am62_soc_data = {
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
.is_single_core = true,
+ .core_data = &r5_data,
};
static const struct of_device_id k3_r5_of_match[] = {
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 5aeedeaf3c41..1af89782e116 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -380,6 +380,18 @@ static int zynqmp_r5_rproc_start(struct rproc *rproc)
dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr,
bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
+ /* Request node before starting RPU core if new version of API is supported */
+ if (zynqmp_pm_feature(PM_REQUEST_NODE) > 1) {
+ ret = zynqmp_pm_request_node(r5_core->pm_domain_id,
+ ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0) {
+ dev_err(r5_core->dev, "failed to request 0x%x",
+ r5_core->pm_domain_id);
+ return ret;
+ }
+ }
+
ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1,
bootmem, ZYNQMP_PM_REQUEST_ACK_NO);
if (ret)
@@ -401,10 +413,30 @@ static int zynqmp_r5_rproc_stop(struct rproc *rproc)
struct zynqmp_r5_core *r5_core = rproc->priv;
int ret;
+ /* Use release node API to stop core if new version of API is supported */
+ if (zynqmp_pm_feature(PM_RELEASE_NODE) > 1) {
+ ret = zynqmp_pm_release_node(r5_core->pm_domain_id);
+ if (ret)
+ dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check expected version of EEMI call before calling it. This avoids
+ * any error or warning prints from firmware as it is expected that fw
+ * doesn't support it.
+ */
+ if (zynqmp_pm_feature(PM_FORCE_POWERDOWN) != 1) {
+ dev_dbg(r5_core->dev, "EEMI interface %d ver 1 not supported\n",
+ PM_FORCE_POWERDOWN);
+ return -EOPNOTSUPP;
+ }
+
+ /* maintain force pwr down for backward compatibility */
ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret)
- dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
+ dev_err(r5_core->dev, "core force power down failed\n");
return ret;
}
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 40d386809d6b..87c944d4b4f3 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -746,7 +746,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
__le32 hdr[5] = { cpu_to_le32(len), };
int tlen = sizeof(hdr) + len;
unsigned long flags;
- int ret;
+ int ret = 0;
/* Word aligned channels only accept word size aligned data */
if (channel->info_word && len % 4)
@@ -1369,7 +1369,8 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
if (IS_ERR(edge->mbox_chan)) {
if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
- ret = PTR_ERR(edge->mbox_chan);
+ ret = dev_err_probe(dev, PTR_ERR(edge->mbox_chan),
+ "failed to acquire IPC mailbox\n");
goto put_node;
}
@@ -1386,6 +1387,7 @@ static int qcom_smd_parse_edge(struct device *dev,
of_node_put(syscon_np);
if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
+ dev_err(dev, "failed to get regmap from syscon: %d\n", ret);
goto put_node;
}
@@ -1501,10 +1503,8 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
}
ret = qcom_smd_parse_edge(&edge->dev, node, edge);
- if (ret) {
- dev_err(&edge->dev, "failed to parse smd edge\n");
+ if (ret)
goto unregister_dev;
- }
ret = qcom_smd_create_chrdev(edge);
if (ret) {
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 207b64c0a2fe..6ee36adcbdba 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -194,38 +194,6 @@ int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
EXPORT_SYMBOL(rpmsg_sendto);
/**
- * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
- * @ept: the rpmsg endpoint
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @ept
- * endpoint belongs to.
- * In case there are no TX buffers available, the function will block until
- * one becomes available, or a timeout of 15 seconds elapses. When the latter
- * happens, -ERESTARTSYS is returned.
- *
- * Can only be called from process context (for now).
- *
- * Return: 0 on success and an appropriate error value on failure.
- */
-int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len)
-{
- if (WARN_ON(!ept))
- return -EINVAL;
- if (!ept->ops->send_offchannel)
- return -ENXIO;
-
- return ept->ops->send_offchannel(ept, src, dst, data, len);
-}
-EXPORT_SYMBOL(rpmsg_send_offchannel);
-
-/**
* rpmsg_trysend() - send a message across to the remote processor
* @ept: the rpmsg endpoint
* @data: payload of message
@@ -302,37 +270,6 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
EXPORT_SYMBOL(rpmsg_poll);
/**
- * rpmsg_trysend_offchannel() - send a message using explicit src/dst addresses
- * @ept: the rpmsg endpoint
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @ept
- * endpoint belongs to.
- * In case there are no TX buffers available, the function will immediately
- * return -ENOMEM without waiting until one becomes available.
- *
- * Can only be called from process context (for now).
- *
- * Return: 0 on success and an appropriate error value on failure.
- */
-int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len)
-{
- if (WARN_ON(!ept))
- return -EINVAL;
- if (!ept->ops->trysend_offchannel)
- return -ENXIO;
-
- return ept->ops->trysend_offchannel(ept, src, dst, data, len);
-}
-EXPORT_SYMBOL(rpmsg_trysend_offchannel);
-
-/**
* rpmsg_set_flow_control() - request remote to pause/resume transmission
* @ept: the rpmsg endpoint
* @pause: pause transmission
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 42c7007be1b5..397e4926bd02 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -50,10 +50,8 @@ struct rpmsg_device_ops {
* @destroy_ept: see @rpmsg_destroy_ept(), required
* @send: see @rpmsg_send(), required
* @sendto: see @rpmsg_sendto(), optional
- * @send_offchannel: see @rpmsg_send_offchannel(), optional
* @trysend: see @rpmsg_trysend(), required
* @trysendto: see @rpmsg_trysendto(), optional
- * @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
* @poll: see @rpmsg_poll(), optional
* @set_flow_control: see @rpmsg_set_flow_control(), optional
* @get_mtu: see @rpmsg_get_mtu(), optional
@@ -67,13 +65,9 @@ struct rpmsg_endpoint_ops {
int (*send)(struct rpmsg_endpoint *ept, void *data, int len);
int (*sendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
- int (*send_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
int (*trysend)(struct rpmsg_endpoint *ept, void *data, int len);
int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
- int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
__poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
int (*set_flow_control)(struct rpmsg_endpoint *ept, bool pause, u32 dst);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 89d7a3b8c48b..4730b1c8b322 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -141,13 +141,9 @@ static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept);
static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
u32 dst);
-static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len);
static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
int len, u32 dst);
-static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len);
static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept);
static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
struct rpmsg_channel_info *chinfo);
@@ -156,10 +152,8 @@ static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
.destroy_ept = virtio_rpmsg_destroy_ept,
.send = virtio_rpmsg_send,
.sendto = virtio_rpmsg_sendto,
- .send_offchannel = virtio_rpmsg_send_offchannel,
.trysend = virtio_rpmsg_trysend,
.trysendto = virtio_rpmsg_trysendto,
- .trysend_offchannel = virtio_rpmsg_trysend_offchannel,
.get_mtu = virtio_rpmsg_get_mtu,
};
@@ -545,7 +539,7 @@ static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
* the function will immediately fail, and -ENOMEM will be returned.
*
* Normally drivers shouldn't use this function directly; instead, drivers
- * should use the appropriate rpmsg_{try}send{to, _offchannel} API
+ * should use the appropriate rpmsg_{try}send{to} API
* (see include/linux/rpmsg.h).
*
* Return: 0 on success and an appropriate error value on failure.
@@ -665,14 +659,6 @@ static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
}
-static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
-{
- struct rpmsg_device *rpdev = ept->rpdev;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
-}
-
static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
struct rpmsg_device *rpdev = ept->rpdev;
@@ -690,14 +676,6 @@ static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
}
-static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
-{
- struct rpmsg_device *rpdev = ept->rpdev;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
-}
-
static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept)
{
struct rpmsg_device *rpdev = ept->rpdev;
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
index ecdc0f0f4f4e..3cf7e08df809 100644
--- a/drivers/scsi/bnx2fc/Kconfig
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -5,7 +5,6 @@ config SCSI_BNX2X_FCOE
depends on (IPV6 || IPV6=n)
depends on LIBFC
depends on LIBFCOE
- depends on MMU
select NETDEVICES
select ETHERNET
select NET_VENDOR_BROADCOM
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index 0cc06c2ce0b8..75ace2302fed 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -4,7 +4,6 @@ config SCSI_BNX2_ISCSI
depends on NET
depends on PCI
depends on (IPV6 || IPV6=n)
- depends on MMU
select SCSI_ISCSI_ATTRS
select NETDEVICES
select ETHERNET
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
index fd129650434f..3f747fd61d19 100644
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -1616,6 +1616,7 @@ static void qcom_spi_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq9574_snandc_props = {
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
.supports_bam = true,
};
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 69c1df0f4ca5..aac67a4413ce 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -243,6 +243,9 @@ hv_uio_probe(struct hv_device *dev,
if (!ring_size)
ring_size = SZ_2M;
+ /* Adjust ring size if necessary to have it page aligned */
+ ring_size = VMBUS_RING_SIZE(ring_size);
+
pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -274,13 +277,13 @@ hv_uio_probe(struct hv_device *dev,
pdata->info.mem[INT_PAGE_MAP].name = "int_page";
pdata->info.mem[INT_PAGE_MAP].addr
= (uintptr_t)vmbus_connection.int_page;
- pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[INT_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
pdata->info.mem[MON_PAGE_MAP].addr
= (uintptr_t)vmbus_connection.monitor_pages[1];
- pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[MON_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
if (channel->device_id == HV_NIC) {
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 740311c4fa24..c7a05f842745 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -144,8 +144,8 @@ static struct hid_descriptor hidg_desc = {
.bcdHID = cpu_to_le16(0x0101),
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
- /*.desc[0].bDescriptorType = DYNAMIC */
- /*.desc[0].wDescriptorLenght = DYNAMIC */
+ /*.rpt_desc.bDescriptorType = DYNAMIC */
+ /*.rpt_desc.wDescriptorLength = DYNAMIC */
};
/* Super-Speed Support */
@@ -939,8 +939,8 @@ static int hidg_setup(struct usb_function *f,
struct hid_descriptor hidg_desc_copy = hidg_desc;
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
- hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc_copy.desc[0].wDescriptorLength =
+ hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc_copy.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
length = min_t(unsigned short, length,
@@ -1210,8 +1210,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
* We can use hidg_desc struct here but we should not relay
* that its content won't change after returning from this function.
*/
- hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc.desc[0].wDescriptorLength =
+ hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
hidg_hs_in_ep_desc.bEndpointAddress =
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 8adf6f954633..5ea884ef36af 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -7166,7 +7166,7 @@ static void tcpm_fw_get_timings(struct tcpm_port *port, struct fwnode_handle *fw
static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
{
- struct fwnode_handle *capabilities, *child, *caps = NULL;
+ struct fwnode_handle *capabilities, *caps = NULL;
unsigned int nr_src_pdo, nr_snk_pdo;
const char *opmode_str;
u32 *src_pdo, *snk_pdo;
@@ -7232,9 +7232,7 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode
if (!capabilities) {
port->pd_count = 1;
} else {
- fwnode_for_each_child_node(capabilities, child)
- port->pd_count++;
-
+ port->pd_count = fwnode_get_child_node_count(capabilities);
if (!port->pd_count) {
ret = -ENODATA;
goto put_capabilities;
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index c3bcb6911c53..2b0172f54665 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "VFIO support for PCI devices"
- depends on PCI && MMU
+ depends on PCI
config VFIO_PCI_CORE
tristate
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index f699e5827ccb..9dc93c5e480b 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -15,7 +15,6 @@
#include <linux/notifier.h>
#include <linux/ctype.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/slab.h>
#ifdef CONFIG_PMAC_BACKLIGHT
@@ -57,10 +56,10 @@
* a hot-key to adjust backlight, the driver must notify the backlight
* core that brightness has changed using backlight_force_update().
*
- * The backlight driver core receives notifications from fbdev and
- * if the event is FB_EVENT_BLANK and if the value of blank, from the
- * FBIOBLANK ioctrl, results in a change in the backlight state the
- * update_status() operation is called.
+ * Display drives can control the backlight device's status using
+ * backlight_notify_blank() and backlight_notify_blank_all(). If this
+ * results in a change in the backlight state the functions call the
+ * update_status() operation.
*/
static struct list_head backlight_dev_list;
@@ -78,85 +77,40 @@ static const char *const backlight_scale_types[] = {
[BACKLIGHT_SCALE_NON_LINEAR] = "non-linear",
};
-#if defined(CONFIG_FB_CORE) || (defined(CONFIG_FB_CORE_MODULE) && \
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
-/*
- * fb_notifier_callback
- *
- * This callback gets called when something important happens inside a
- * framebuffer driver. The backlight core only cares about FB_BLANK_UNBLANK
- * which is reported to the driver using backlight_update_status()
- * as a state change.
- *
- * There may be several fbdev's connected to the backlight device,
- * in which case they are kept track of. A state change is only reported
- * if there is a change in backlight for the specified fbdev.
- */
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data)
+void backlight_notify_blank(struct backlight_device *bd, struct device *display_dev,
+ bool fb_on, bool prev_fb_on)
{
- struct backlight_device *bd;
- struct fb_event *evdata = data;
- struct fb_info *info = evdata->info;
- struct backlight_device *fb_bd = fb_bl_device(info);
- int node = info->node;
- int fb_blank = 0;
-
- /* If we aren't interested in this event, skip it immediately ... */
- if (event != FB_EVENT_BLANK)
- return 0;
-
- bd = container_of(self, struct backlight_device, fb_notif);
- mutex_lock(&bd->ops_lock);
+ guard(mutex)(&bd->ops_lock);
if (!bd->ops)
- goto out;
- if (bd->ops->controls_device && !bd->ops->controls_device(bd, info->device))
- goto out;
- if (fb_bd && fb_bd != bd)
- goto out;
-
- fb_blank = *(int *)evdata->data;
- if (fb_blank == FB_BLANK_UNBLANK && !bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = true;
+ return;
+ if (bd->ops->controls_device && !bd->ops->controls_device(bd, display_dev))
+ return;
+
+ if (fb_on && (!prev_fb_on || !bd->use_count)) {
if (!bd->use_count++) {
bd->props.state &= ~BL_CORE_FBBLANK;
backlight_update_status(bd);
}
- } else if (fb_blank != FB_BLANK_UNBLANK && bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = false;
+ } else if (!fb_on && prev_fb_on && bd->use_count) {
if (!(--bd->use_count)) {
bd->props.state |= BL_CORE_FBBLANK;
backlight_update_status(bd);
}
}
-out:
- mutex_unlock(&bd->ops_lock);
- return 0;
}
+EXPORT_SYMBOL(backlight_notify_blank);
-static int backlight_register_fb(struct backlight_device *bd)
+void backlight_notify_blank_all(struct device *display_dev, bool fb_on, bool prev_fb_on)
{
- memset(&bd->fb_notif, 0, sizeof(bd->fb_notif));
- bd->fb_notif.notifier_call = fb_notifier_callback;
+ struct backlight_device *bd;
- return fb_register_client(&bd->fb_notif);
-}
+ guard(mutex)(&backlight_dev_list_mutex);
-static void backlight_unregister_fb(struct backlight_device *bd)
-{
- fb_unregister_client(&bd->fb_notif);
-}
-#else
-static inline int backlight_register_fb(struct backlight_device *bd)
-{
- return 0;
+ list_for_each_entry(bd, &backlight_dev_list, entry)
+ backlight_notify_blank(bd, display_dev, fb_on, prev_fb_on);
}
-
-static inline void backlight_unregister_fb(struct backlight_device *bd)
-{
-}
-#endif /* CONFIG_FB_CORE */
+EXPORT_SYMBOL(backlight_notify_blank_all);
static void backlight_generate_event(struct backlight_device *bd,
enum backlight_update_reason reason)
@@ -447,12 +401,6 @@ struct backlight_device *backlight_device_register(const char *name,
return ERR_PTR(rc);
}
- rc = backlight_register_fb(new_bd);
- if (rc) {
- device_unregister(&new_bd->dev);
- return ERR_PTR(rc);
- }
-
new_bd->ops = ops;
#ifdef CONFIG_PMAC_BACKLIGHT
@@ -539,7 +487,6 @@ void backlight_device_unregister(struct backlight_device *bd)
bd->ops = NULL;
mutex_unlock(&bd->ops_lock);
- backlight_unregister_fb(bd);
device_unregister(&bd->dev);
}
EXPORT_SYMBOL(backlight_device_unregister);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 3267acf8dc5b..affe5c52471a 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -15,86 +15,59 @@
#include <linux/notifier.h>
#include <linux/ctype.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/slab.h>
-#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
- defined(CONFIG_LCD_CLASS_DEVICE_MODULE))
-static int to_lcd_power(int fb_blank)
-{
- switch (fb_blank) {
- case FB_BLANK_UNBLANK:
- return LCD_POWER_ON;
- /* deprecated; TODO: should become 'off' */
- case FB_BLANK_NORMAL:
- return LCD_POWER_REDUCED;
- case FB_BLANK_VSYNC_SUSPEND:
- return LCD_POWER_REDUCED_VSYNC_SUSPEND;
- /* 'off' */
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_POWERDOWN:
- default:
- return LCD_POWER_OFF;
- }
-}
+static DEFINE_MUTEX(lcd_dev_list_mutex);
+static LIST_HEAD(lcd_dev_list);
-/* This callback gets called when something important happens inside a
- * framebuffer driver. We're looking if that important event is blanking,
- * and if it is, we're switching lcd power as well ...
- */
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data)
+static void lcd_notify_blank(struct lcd_device *ld, struct device *display_dev,
+ int power)
{
- struct lcd_device *ld = container_of(self, struct lcd_device, fb_notif);
- struct fb_event *evdata = data;
- struct fb_info *info = evdata->info;
- struct lcd_device *fb_lcd = fb_lcd_device(info);
-
guard(mutex)(&ld->ops_lock);
- if (!ld->ops)
- return 0;
- if (ld->ops->controls_device && !ld->ops->controls_device(ld, info->device))
- return 0;
- if (fb_lcd && fb_lcd != ld)
- return 0;
+ if (!ld->ops || !ld->ops->set_power)
+ return;
+ if (ld->ops->controls_device && !ld->ops->controls_device(ld, display_dev))
+ return;
- if (event == FB_EVENT_BLANK) {
- int power = to_lcd_power(*(int *)evdata->data);
+ ld->ops->set_power(ld, power);
+}
- if (ld->ops->set_power)
- ld->ops->set_power(ld, power);
- } else {
- const struct fb_videomode *videomode = evdata->data;
+void lcd_notify_blank_all(struct device *display_dev, int power)
+{
+ struct lcd_device *ld;
- if (ld->ops->set_mode)
- ld->ops->set_mode(ld, videomode->xres, videomode->yres);
- }
+ guard(mutex)(&lcd_dev_list_mutex);
- return 0;
+ list_for_each_entry(ld, &lcd_dev_list, entry)
+ lcd_notify_blank(ld, display_dev, power);
}
+EXPORT_SYMBOL(lcd_notify_blank_all);
-static int lcd_register_fb(struct lcd_device *ld)
+static void lcd_notify_mode_change(struct lcd_device *ld, struct device *display_dev,
+ unsigned int width, unsigned int height)
{
- memset(&ld->fb_notif, 0, sizeof(ld->fb_notif));
- ld->fb_notif.notifier_call = fb_notifier_callback;
- return fb_register_client(&ld->fb_notif);
-}
+ guard(mutex)(&ld->ops_lock);
-static void lcd_unregister_fb(struct lcd_device *ld)
-{
- fb_unregister_client(&ld->fb_notif);
-}
-#else
-static int lcd_register_fb(struct lcd_device *ld)
-{
- return 0;
+ if (!ld->ops || !ld->ops->set_mode)
+ return;
+ if (ld->ops->controls_device && !ld->ops->controls_device(ld, display_dev))
+ return;
+
+ ld->ops->set_mode(ld, width, height);
}
-static inline void lcd_unregister_fb(struct lcd_device *ld)
+void lcd_notify_mode_change_all(struct device *display_dev,
+ unsigned int width, unsigned int height)
{
+ struct lcd_device *ld;
+
+ guard(mutex)(&lcd_dev_list_mutex);
+
+ list_for_each_entry(ld, &lcd_dev_list, entry)
+ lcd_notify_mode_change(ld, display_dev, width, height);
}
-#endif /* CONFIG_FB */
+EXPORT_SYMBOL(lcd_notify_mode_change_all);
static ssize_t lcd_power_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -245,11 +218,8 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
return ERR_PTR(rc);
}
- rc = lcd_register_fb(new_ld);
- if (rc) {
- device_unregister(&new_ld->dev);
- return ERR_PTR(rc);
- }
+ guard(mutex)(&lcd_dev_list_mutex);
+ list_add(&new_ld->entry, &lcd_dev_list);
return new_ld;
}
@@ -266,10 +236,12 @@ void lcd_device_unregister(struct lcd_device *ld)
if (!ld)
return;
+ guard(mutex)(&lcd_dev_list_mutex);
+ list_del(&ld->entry);
+
mutex_lock(&ld->ops_lock);
ld->ops = NULL;
mutex_unlock(&ld->ops_lock);
- lcd_unregister_fb(ld);
device_unregister(&ld->dev);
}
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 9afe701b2a1b..a63bb42c8f8b 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -1406,9 +1406,11 @@ static int wled_configure(struct wled *wled)
wled->ctrl_addr = be32_to_cpu(*prop_addr);
rc = of_property_read_string(dev->of_node, "label", &wled->name);
- if (rc)
+ if (rc) {
wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
-
+ if (!wled->name)
+ return -ENOMEM;
+ }
switch (wled->version) {
case 3:
u32_opts = wled3_opts;
diff --git a/drivers/video/fbdev/core/fb_backlight.c b/drivers/video/fbdev/core/fb_backlight.c
index 6fdaa9f81be9..dbed9696f4c5 100644
--- a/drivers/video/fbdev/core/fb_backlight.c
+++ b/drivers/video/fbdev/core/fb_backlight.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/backlight.h>
#include <linux/export.h>
#include <linux/fb.h>
#include <linux/mutex.h>
@@ -36,4 +37,15 @@ struct backlight_device *fb_bl_device(struct fb_info *info)
return info->bl_dev;
}
EXPORT_SYMBOL(fb_bl_device);
+
+void fb_bl_notify_blank(struct fb_info *info, int old_blank)
+{
+ bool on = info->blank == FB_BLANK_UNBLANK;
+ bool prev_on = old_blank == FB_BLANK_UNBLANK;
+
+ if (info->bl_dev)
+ backlight_notify_blank(info->bl_dev, info->device, on, prev_on);
+ else
+ backlight_notify_blank_all(info->device, on, prev_on);
+}
#endif
diff --git a/drivers/video/fbdev/core/fb_info.c b/drivers/video/fbdev/core/fb_info.c
index 4847ebe50d7d..52f9bd2c5417 100644
--- a/drivers/video/fbdev/core/fb_info.c
+++ b/drivers/video/fbdev/core/fb_info.c
@@ -42,6 +42,7 @@ struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
info->device = dev;
info->fbcon_rotate_hint = -1;
+ info->blank = FB_BLANK_UNBLANK;
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
mutex_init(&info->bl_curve_mutex);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index eca2498f2436..dfcf5e4d1d4c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -15,6 +15,8 @@
#include <linux/export.h>
#include <linux/fb.h>
#include <linux/fbcon.h>
+#include <linux/lcd.h>
+#include <linux/leds.h>
#include <video/nomodeset.h>
@@ -220,6 +222,12 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
return err;
}
+static void fb_lcd_notify_mode_change(struct fb_info *info,
+ struct fb_videomode *mode)
+{
+ lcd_notify_mode_change_all(info->device, mode->xres, mode->yres);
+}
+
int
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
@@ -227,7 +235,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
u32 activate;
struct fb_var_screeninfo old_var;
struct fb_videomode mode;
- struct fb_event event;
u32 unused;
if (var->activate & FB_ACTIVATE_INV_MODE) {
@@ -333,32 +340,71 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
return ret;
}
- event.info = info;
- event.data = &mode;
- fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
+ fb_lcd_notify_mode_change(info, &mode);
return 0;
}
EXPORT_SYMBOL(fb_set_var);
-int
-fb_blank(struct fb_info *info, int blank)
+static void fb_lcd_notify_blank(struct fb_info *info)
{
- struct fb_event event;
- int ret = -EINVAL;
+ int power;
+
+ switch (info->blank) {
+ case FB_BLANK_UNBLANK:
+ power = LCD_POWER_ON;
+ break;
+ /* deprecated; TODO: should become 'off' */
+ case FB_BLANK_NORMAL:
+ power = LCD_POWER_REDUCED;
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ power = LCD_POWER_REDUCED_VSYNC_SUSPEND;
+ break;
+ /* 'off' */
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ default:
+ power = LCD_POWER_OFF;
+ break;
+ }
+
+ lcd_notify_blank_all(info->device, power);
+}
+
+static void fb_ledtrig_backlight_notify_blank(struct fb_info *info)
+{
+ if (info->blank == FB_BLANK_UNBLANK)
+ ledtrig_backlight_blank(false);
+ else
+ ledtrig_backlight_blank(true);
+}
+
+int fb_blank(struct fb_info *info, int blank)
+{
+ int old_blank = info->blank;
+ int ret;
+
+ if (!info->fbops->fb_blank)
+ return -EINVAL;
if (blank > FB_BLANK_POWERDOWN)
blank = FB_BLANK_POWERDOWN;
- event.info = info;
- event.data = &blank;
+ info->blank = blank;
- if (info->fbops->fb_blank)
- ret = info->fbops->fb_blank(blank, info);
+ ret = info->fbops->fb_blank(blank, info);
+ if (ret)
+ goto err;
+
+ fb_bl_notify_blank(info, old_blank);
+ fb_lcd_notify_blank(info);
+ fb_ledtrig_backlight_notify_blank(info);
- if (!ret)
- fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+ return 0;
+err:
+ info->blank = old_blank;
return ret;
}
EXPORT_SYMBOL(fb_blank);
@@ -416,6 +462,14 @@ static int do_register_framebuffer(struct fb_info *fb_info)
mutex_init(&fb_info->lock);
mutex_init(&fb_info->mm_lock);
+ /*
+ * With an fb_blank callback present, we assume that the
+ * display is blank, so that fb_blank() enables it on the
+ * first modeset.
+ */
+ if (fb_info->fbops->fb_blank)
+ fb_info->blank = FB_BLANK_POWERDOWN;
+
fb_device_create(fb_info);
if (fb_info->pixmap.addr == NULL) {
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 06d75c767579..b8344c40073b 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -242,11 +242,11 @@ static ssize_t store_blank(struct device *device,
return count;
}
-static ssize_t show_blank(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t show_blank(struct device *device, struct device_attribute *attr, char *buf)
{
-// struct fb_info *fb_info = dev_get_drvdata(device);
- return 0;
+ struct fb_info *fb_info = dev_get_drvdata(device);
+
+ return sysfs_emit(buf, "%d\n", fb_info->blank);
}
static ssize_t store_console(struct device *device,
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0d8d37f712e8..0c25b2ed44eb 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -804,6 +804,15 @@ config IMX7ULP_WDT
To compile this driver as a module, choose M here: the
module will be called imx7ulp_wdt.
+config S32G_WDT
+ tristate "S32G Watchdog"
+ depends on ARCH_S32 || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This is the driver for the hardware watchdog on the NXP
+ S32G platforms. If you wish to have watchdog support
+ enabled, say Y, otherwise say N.
+
config DB500_WATCHDOG
tristate "ST-Ericsson DB800 watchdog"
depends on MFD_DB8500_PRCMU
@@ -1001,7 +1010,7 @@ config STM32_WATCHDOG
tristate "STM32 Independent WatchDoG (IWDG) support"
depends on ARCH_STM32 || COMPILE_TEST
select WATCHDOG_CORE
- default y
+ default ARCH_STM32
help
Say Y here to include support for the watchdog timer
in stm32 SoCs.
@@ -1363,6 +1372,17 @@ config INTEL_MID_WATCHDOG
To compile this driver as a module, choose M here.
+config INTEL_OC_WATCHDOG
+ tristate "Intel OC Watchdog"
+ depends on (X86 || COMPILE_TEST) && ACPI && HAS_IOPORT
+ select WATCHDOG_CORE
+ help
+ Hardware driver for Intel Over-Clocking watchdog present in
+ Platform Controller Hub (PCH) chipsets.
+
+ To compile this driver as a module, choose M here: the
+ module will be called intel_oc_wdt.
+
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on X86 && PCI
@@ -1869,7 +1889,7 @@ config OCTEON_WDT
config MARVELL_GTI_WDT
tristate "Marvell GTI Watchdog driver"
depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
- default y
+ default ARCH_THUNDER
select WATCHDOG_CORE
help
Marvell GTI hardware supports watchdog timer. First timeout
@@ -2035,7 +2055,7 @@ config 8xxx_WDT
config PIKA_WDT
tristate "PIKA FPGA Watchdog"
depends on WARP || (PPC64 && COMPILE_TEST)
- default y
+ default WARP
help
This enables the watchdog in the PIKA FPGA. Currently used on
the Warp platform.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c9482904bf87..bbd4d62d2cc3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
obj-$(CONFIG_IMX_SC_WDT) += imx_sc_wdt.o
obj-$(CONFIG_IMX7ULP_WDT) += imx7ulp_wdt.o
+obj-$(CONFIG_S32G_WDT) += s32g_wdt.o
obj-$(CONFIG_DB500_WATCHDOG) += db8500_wdt.o
obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
@@ -150,6 +151,7 @@ obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
+obj-$(CONFIG_INTEL_OC_WATCHDOG) += intel_oc_wdt.o
obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o
obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o
obj-$(CONFIG_NIC7018_WDT) += nic7018_wdt.o
diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
index 95d9e37df41c..66a158f67a71 100644
--- a/drivers/watchdog/apple_wdt.c
+++ b/drivers/watchdog/apple_wdt.c
@@ -95,9 +95,12 @@ static int apple_wdt_ping(struct watchdog_device *wdd)
static int apple_wdt_set_timeout(struct watchdog_device *wdd, unsigned int s)
{
struct apple_wdt *wdt = to_apple_wdt(wdd);
+ u32 actual;
writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
- writel_relaxed(wdt->clk_rate * s, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+
+ actual = min(s, wdd->max_hw_heartbeat_ms / 1000);
+ writel_relaxed(wdt->clk_rate * actual, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
wdd->timeout = s;
@@ -177,7 +180,7 @@ static int apple_wdt_probe(struct platform_device *pdev)
wdt->wdd.ops = &apple_wdt_ops;
wdt->wdd.info = &apple_wdt_info;
- wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate;
+ wdt->wdd.max_hw_heartbeat_ms = U32_MAX / wdt->clk_rate * 1000;
wdt->wdd.timeout = APPLE_WDT_TIMEOUT_DEFAULT;
wdt_ctrl = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CTRL);
diff --git a/drivers/watchdog/arm_smc_wdt.c b/drivers/watchdog/arm_smc_wdt.c
index 8f3d0c3a005f..bbba23ace7b8 100644
--- a/drivers/watchdog/arm_smc_wdt.c
+++ b/drivers/watchdog/arm_smc_wdt.c
@@ -46,6 +46,8 @@ static int smcwd_call(struct watchdog_device *wdd, enum smcwd_call call,
return -ENODEV;
if (res->a0 == PSCI_RET_INVALID_PARAMS)
return -EINVAL;
+ if (res->a0 == PSCI_RET_DISABLED)
+ return -ENODATA;
if (res->a0 != PSCI_RET_SUCCESS)
return -EIO;
return 0;
@@ -131,10 +133,19 @@ static int smcwd_probe(struct platform_device *pdev)
wdd->info = &smcwd_info;
/* get_timeleft is optional */
- if (smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL))
- wdd->ops = &smcwd_ops;
- else
+ err = smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL);
+ switch (err) {
+ case 0:
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ fallthrough;
+ case -ENODATA:
wdd->ops = &smcwd_timeleft_ops;
+ break;
+ default:
+ wdd->ops = &smcwd_ops;
+ break;
+ }
+
wdd->timeout = res.a2;
wdd->max_timeout = res.a2;
wdd->min_timeout = res.a1;
diff --git a/drivers/watchdog/cros_ec_wdt.c b/drivers/watchdog/cros_ec_wdt.c
index 716c23f4388c..9ffe7f505645 100644
--- a/drivers/watchdog/cros_ec_wdt.c
+++ b/drivers/watchdog/cros_ec_wdt.c
@@ -25,26 +25,22 @@ static int cros_ec_wdt_send_cmd(struct cros_ec_device *cros_ec,
union cros_ec_wdt_data *arg)
{
int ret;
- struct {
- struct cros_ec_command msg;
- union cros_ec_wdt_data data;
- } __packed buf = {
- .msg = {
- .version = 0,
- .command = EC_CMD_HANG_DETECT,
- .insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ?
- sizeof(struct ec_response_hang_detect) :
- 0,
- .outsize = sizeof(struct ec_params_hang_detect),
- },
- .data.req = arg->req
- };
-
- ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(union cros_ec_wdt_data));
+
+ msg->version = 0;
+ msg->command = EC_CMD_HANG_DETECT;
+ msg->insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ?
+ sizeof(struct ec_response_hang_detect) :
+ 0;
+ msg->outsize = sizeof(struct ec_params_hang_detect);
+ *(struct ec_params_hang_detect *)msg->data = arg->req;
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, msg);
if (ret < 0)
return ret;
- arg->resp = buf.data.resp;
+ arg->resp = *(struct ec_response_hang_detect *)msg->data;
return 0;
}
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 77039f2f0be5..afb7887c3a1e 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -30,6 +30,18 @@ struct da9052_wdt_data {
unsigned long jpast;
};
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int timeout;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. (default = "
+ __MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")");
+
static const struct {
u8 reg_val;
int time; /* Seconds */
@@ -168,10 +180,13 @@ static int da9052_wdt_probe(struct platform_device *pdev)
da9052_wdt = &driver_data->wdt;
da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+ da9052_wdt->min_hw_heartbeat_ms = DA9052_TWDMIN;
da9052_wdt->info = &da9052_wdt_info;
da9052_wdt->ops = &da9052_wdt_ops;
da9052_wdt->parent = dev;
watchdog_set_drvdata(da9052_wdt, driver_data);
+ watchdog_init_timeout(da9052_wdt, timeout, dev);
+ watchdog_set_nowayout(da9052_wdt, nowayout);
if (da9052->fault_log & DA9052_FAULTLOG_TWDERROR)
da9052_wdt->bootstatus |= WDIOF_CARDRESET;
@@ -180,11 +195,15 @@ static int da9052_wdt_probe(struct platform_device *pdev)
if (da9052->fault_log & DA9052_FAULTLOG_VDDFAULT)
da9052_wdt->bootstatus |= WDIOF_POWERUNDER;
- ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
- DA9052_CONTROLD_TWDSCALE, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to disable watchdog bits, %d\n", ret);
+ ret = da9052_reg_read(da9052, DA9052_CONTROL_D_REG);
+ if (ret < 0)
return ret;
+
+ /* Check if FW enabled the watchdog */
+ if (ret & DA9052_CONTROLD_TWDSCALE) {
+ /* Ensure proper initialization */
+ da9052_wdt_start(da9052_wdt);
+ set_bit(WDOG_HW_RUNNING, &da9052_wdt->status);
}
return devm_watchdog_register_device(dev, &driver_data->wdt);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 7672582fa407..9ab769aa0244 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -58,7 +58,6 @@
#include <linux/platform_device.h> /* For platform_driver framework */
#include <linux/pci.h> /* For pci functions */
#include <linux/ioport.h> /* For io-port access */
-#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
#include <linux/platform_data/itco_wdt.h>
@@ -102,8 +101,6 @@ struct iTCO_wdt_private {
* or memory-mapped PMC register bit 4 (TCO version 3).
*/
unsigned long __iomem *gcs_pmc;
- /* the lock for io operations */
- spinlock_t io_lock;
/* the PCI-device */
struct pci_dev *pci_dev;
/* whether or not the watchdog has been suspended */
@@ -286,13 +283,10 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
unsigned int val;
- spin_lock(&p->io_lock);
-
iTCO_vendor_pre_start(p->smi_res, wd_dev->timeout);
/* disable chipset's NO_REBOOT bit */
if (p->update_no_reboot_bit(p->no_reboot_priv, false)) {
- spin_unlock(&p->io_lock);
dev_err(wd_dev->parent, "failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n");
return -EIO;
}
@@ -309,7 +303,6 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
val &= 0xf7ff;
outw(val, TCO1_CNT(p));
val = inw(TCO1_CNT(p));
- spin_unlock(&p->io_lock);
if (val & 0x0800)
return -1;
@@ -321,8 +314,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev)
struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
unsigned int val;
- spin_lock(&p->io_lock);
-
iTCO_vendor_pre_stop(p->smi_res);
/* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
@@ -334,8 +325,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev)
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
p->update_no_reboot_bit(p->no_reboot_priv, true);
- spin_unlock(&p->io_lock);
-
if ((val & 0x0800) == 0)
return -1;
return 0;
@@ -345,8 +334,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
{
struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
- spin_lock(&p->io_lock);
-
/* Reload the timer by writing to the TCO Timer Counter register */
if (p->iTCO_version >= 2) {
outw(0x01, TCO_RLD(p));
@@ -358,7 +345,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
outb(0x01, TCO_RLD(p));
}
- spin_unlock(&p->io_lock);
return 0;
}
@@ -385,24 +371,20 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
/* Write new heartbeat to watchdog */
if (p->iTCO_version >= 2) {
- spin_lock(&p->io_lock);
val16 = inw(TCOv2_TMR(p));
val16 &= 0xfc00;
val16 |= tmrval;
outw(val16, TCOv2_TMR(p));
val16 = inw(TCOv2_TMR(p));
- spin_unlock(&p->io_lock);
if ((val16 & 0x3ff) != tmrval)
return -EINVAL;
} else if (p->iTCO_version == 1) {
- spin_lock(&p->io_lock);
val8 = inb(TCOv1_TMR(p));
val8 &= 0xc0;
val8 |= (tmrval & 0xff);
outb(val8, TCOv1_TMR(p));
val8 = inb(TCOv1_TMR(p));
- spin_unlock(&p->io_lock);
if ((val8 & 0x3f) != tmrval)
return -EINVAL;
@@ -421,19 +403,15 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
/* read the TCO Timer */
if (p->iTCO_version >= 2) {
- spin_lock(&p->io_lock);
val16 = inw(TCO_RLD(p));
val16 &= 0x3ff;
- spin_unlock(&p->io_lock);
time_left = ticks_to_seconds(p, val16);
} else if (p->iTCO_version == 1) {
- spin_lock(&p->io_lock);
val8 = inb(TCO_RLD(p));
val8 &= 0x3f;
if (!(inw(TCO1_STS(p)) & 0x0008))
val8 += (inb(TCOv1_TMR(p)) & 0x3f);
- spin_unlock(&p->io_lock);
time_left = ticks_to_seconds(p, val8);
}
@@ -493,8 +471,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
- spin_lock_init(&p->io_lock);
-
p->tco_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_TCO);
if (!p->tco_res)
return -ENODEV;
@@ -604,6 +580,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
dev_info(dev, "timeout value out of range, using %d\n",
WATCHDOG_TIMEOUT);
+ heartbeat = WATCHDOG_TIMEOUT;
}
watchdog_stop_on_reboot(&p->wddev);
diff --git a/drivers/watchdog/intel_oc_wdt.c b/drivers/watchdog/intel_oc_wdt.c
new file mode 100644
index 000000000000..7c0551106981
--- /dev/null
+++ b/drivers/watchdog/intel_oc_wdt.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel OC Watchdog driver
+ *
+ * Copyright (C) 2025, Siemens
+ * Author: Diogo Ivo <diogo.ivo@siemens.com>
+ */
+
+#define DRV_NAME "intel_oc_wdt"
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define INTEL_OC_WDT_TOV GENMASK(9, 0)
+#define INTEL_OC_WDT_MIN_TOV 1
+#define INTEL_OC_WDT_MAX_TOV 1024
+#define INTEL_OC_WDT_DEF_TOV 60
+
+/*
+ * One-time writable lock bit. If set forbids
+ * modification of itself, _TOV and _EN until
+ * next reboot.
+ */
+#define INTEL_OC_WDT_CTL_LCK BIT(12)
+
+#define INTEL_OC_WDT_EN BIT(14)
+#define INTEL_OC_WDT_NO_ICCSURV_STS BIT(24)
+#define INTEL_OC_WDT_ICCSURV_STS BIT(25)
+#define INTEL_OC_WDT_RLD BIT(31)
+
+#define INTEL_OC_WDT_STS_BITS (INTEL_OC_WDT_NO_ICCSURV_STS | \
+ INTEL_OC_WDT_ICCSURV_STS)
+
+#define INTEL_OC_WDT_CTRL_REG(wdt) ((wdt)->ctrl_res->start)
+
+struct intel_oc_wdt {
+ struct watchdog_device wdd;
+ struct resource *ctrl_res;
+ bool locked;
+};
+
+static int heartbeat;
+module_param(heartbeat, uint, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. (default="
+ __MODULE_STRING(WDT_HEARTBEAT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int intel_oc_wdt_start(struct watchdog_device *wdd)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ if (oc_wdt->locked)
+ return 0;
+
+ outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_EN,
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_stop(struct watchdog_device *wdd)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_EN,
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_ping(struct watchdog_device *wdd)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_RLD,
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int t)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ outl((inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_TOV) | (t - 1),
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ wdd->timeout = t;
+
+ return 0;
+}
+
+static const struct watchdog_info intel_oc_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = DRV_NAME,
+};
+
+static const struct watchdog_ops intel_oc_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = intel_oc_wdt_start,
+ .stop = intel_oc_wdt_stop,
+ .ping = intel_oc_wdt_ping,
+ .set_timeout = intel_oc_wdt_set_timeout,
+};
+
+static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt)
+{
+ struct watchdog_info *info;
+ unsigned long val;
+
+ val = inl(INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ if (val & INTEL_OC_WDT_STS_BITS)
+ oc_wdt->wdd.bootstatus |= WDIOF_CARDRESET;
+
+ oc_wdt->locked = !!(val & INTEL_OC_WDT_CTL_LCK);
+
+ if (val & INTEL_OC_WDT_EN) {
+ /*
+ * No need to issue a ping here to "commit" the new timeout
+ * value to hardware as the watchdog core schedules one
+ * immediately when registering the watchdog.
+ */
+ set_bit(WDOG_HW_RUNNING, &oc_wdt->wdd.status);
+
+ if (oc_wdt->locked) {
+ info = (struct watchdog_info *)&intel_oc_wdt_info;
+ /*
+ * Set nowayout unconditionally as we cannot stop
+ * the watchdog.
+ */
+ nowayout = true;
+ /*
+ * If we are locked read the current timeout value
+ * and inform the core we can't change it.
+ */
+ oc_wdt->wdd.timeout = (val & INTEL_OC_WDT_TOV) + 1;
+ info->options &= ~WDIOF_SETTIMEOUT;
+
+ dev_info(oc_wdt->wdd.parent,
+ "Register access locked, heartbeat fixed at: %u s\n",
+ oc_wdt->wdd.timeout);
+ }
+ } else if (oc_wdt->locked) {
+ /*
+ * In case the watchdog is disabled and locked there
+ * is nothing we can do with it so just fail probing.
+ */
+ return -EACCES;
+ }
+
+ val &= ~INTEL_OC_WDT_TOV;
+ outl(val | (oc_wdt->wdd.timeout - 1), INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_oc_wdt *oc_wdt;
+ struct watchdog_device *wdd;
+ int ret;
+
+ oc_wdt = devm_kzalloc(&pdev->dev, sizeof(*oc_wdt), GFP_KERNEL);
+ if (!oc_wdt)
+ return -ENOMEM;
+
+ oc_wdt->ctrl_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!oc_wdt->ctrl_res) {
+ dev_err(&pdev->dev, "missing I/O resource\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_region(&pdev->dev, oc_wdt->ctrl_res->start,
+ resource_size(oc_wdt->ctrl_res), pdev->name)) {
+ dev_err(dev, "resource %pR already in use, device disabled\n",
+ oc_wdt->ctrl_res);
+ return -EBUSY;
+ }
+
+ wdd = &oc_wdt->wdd;
+ wdd->min_timeout = INTEL_OC_WDT_MIN_TOV;
+ wdd->max_timeout = INTEL_OC_WDT_MAX_TOV;
+ wdd->timeout = INTEL_OC_WDT_DEF_TOV;
+ wdd->info = &intel_oc_wdt_info;
+ wdd->ops = &intel_oc_wdt_ops;
+ wdd->parent = dev;
+
+ watchdog_init_timeout(wdd, heartbeat, dev);
+
+ ret = intel_oc_wdt_setup(oc_wdt);
+ if (ret)
+ return ret;
+
+ watchdog_set_drvdata(wdd, oc_wdt);
+ watchdog_set_nowayout(wdd, nowayout);
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+
+ return devm_watchdog_register_device(dev, wdd);
+}
+
+static const struct acpi_device_id intel_oc_wdt_match[] = {
+ { "INT3F0D" },
+ { "INTC1099" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, intel_oc_wdt_match);
+
+static struct platform_driver intel_oc_wdt_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = intel_oc_wdt_match,
+ },
+ .probe = intel_oc_wdt_probe,
+};
+
+module_platform_driver(intel_oc_wdt_platform_driver);
+
+MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel OC Watchdog driver");
diff --git a/drivers/watchdog/lenovo_se30_wdt.c b/drivers/watchdog/lenovo_se30_wdt.c
index 024b842499b3..1c73bb7eeeee 100644
--- a/drivers/watchdog/lenovo_se30_wdt.c
+++ b/drivers/watchdog/lenovo_se30_wdt.c
@@ -271,6 +271,8 @@ static int lenovo_se30_wdt_probe(struct platform_device *pdev)
return -EBUSY;
priv->shm_base_addr = devm_ioremap(dev, base_phys, SHM_WIN_SIZE);
+ if (!priv->shm_base_addr)
+ return -ENOMEM;
priv->wdt_cfg.mod = WDT_MODULE;
priv->wdt_cfg.idx = WDT_CFG_INDEX;
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 132699e2f247..b636650b714b 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -579,7 +579,7 @@ static struct notifier_block usb_pcwd_notifier = {
.notifier_call = usb_pcwd_notify_sys,
};
-/**
+/*
* usb_pcwd_delete
*/
static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
@@ -590,7 +590,7 @@ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
kfree(usb_pcwd);
}
-/**
+/*
* usb_pcwd_probe
*
* Called by the usb core when a new device is connected that it thinks
@@ -758,7 +758,7 @@ error:
}
-/**
+/*
* usb_pcwd_disconnect
*
* Called by the usb core when the device is removed from the system.
diff --git a/drivers/watchdog/pretimeout_noop.c b/drivers/watchdog/pretimeout_noop.c
index 4799551dd784..74ec02b9ffca 100644
--- a/drivers/watchdog/pretimeout_noop.c
+++ b/drivers/watchdog/pretimeout_noop.c
@@ -11,7 +11,7 @@
/**
* pretimeout_noop - No operation on watchdog pretimeout event
- * @wdd - watchdog_device
+ * @wdd: watchdog_device
*
* This function prints a message about pretimeout to kernel log.
*/
diff --git a/drivers/watchdog/pretimeout_panic.c b/drivers/watchdog/pretimeout_panic.c
index 2cc3c41d2be5..8c3ac674dc45 100644
--- a/drivers/watchdog/pretimeout_panic.c
+++ b/drivers/watchdog/pretimeout_panic.c
@@ -11,7 +11,7 @@
/**
* pretimeout_panic - Panic on watchdog pretimeout event
- * @wdd - watchdog_device
+ * @wdd: watchdog_device
*
* Panic, watchdog has not been fed till pretimeout event.
*/
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 006f9c61aa64..dfaac5995c84 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -181,6 +181,12 @@ static const struct qcom_wdt_match_data match_data_apcs_tmr = {
.max_tick_count = 0x10000000U,
};
+static const struct qcom_wdt_match_data match_data_ipq5424 = {
+ .offset = reg_offset_data_kpss,
+ .pretimeout = true,
+ .max_tick_count = 0xFFFFFU,
+};
+
static const struct qcom_wdt_match_data match_data_kpss = {
.offset = reg_offset_data_kpss,
.pretimeout = true,
@@ -322,6 +328,7 @@ static const struct dev_pm_ops qcom_wdt_pm_ops = {
};
static const struct of_device_id qcom_wdt_of_table[] = {
+ { .compatible = "qcom,apss-wdt-ipq5424", .data = &match_data_ipq5424 },
{ .compatible = "qcom,kpss-timer", .data = &match_data_apcs_tmr },
{ .compatible = "qcom,scss-timer", .data = &match_data_apcs_tmr },
{ .compatible = "qcom,kpss-wdt", .data = &match_data_kpss },
diff --git a/drivers/watchdog/s32g_wdt.c b/drivers/watchdog/s32g_wdt.c
new file mode 100644
index 000000000000..ad55063060af
--- /dev/null
+++ b/drivers/watchdog/s32g_wdt.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Watchdog driver for S32G SoC
+ *
+ * Copyright 2017-2019, 2021-2025 NXP.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define DRIVER_NAME "s32g-swt"
+
+#define S32G_SWT_CR(__base) ((__base) + 0x00) /* Control Register offset */
+#define S32G_SWT_CR_SM (BIT(9) | BIT(10)) /* -> Service Mode */
+#define S32G_SWT_CR_STP BIT(2) /* -> Stop Mode Control */
+#define S32G_SWT_CR_FRZ BIT(1) /* -> Debug Mode Control */
+#define S32G_SWT_CR_WEN BIT(0) /* -> Watchdog Enable */
+
+#define S32G_SWT_TO(__base) ((__base) + 0x08) /* Timeout Register offset */
+
+#define S32G_SWT_SR(__base) ((__base) + 0x10) /* Service Register offset */
+#define S32G_WDT_SEQ1 0xA602 /* -> service sequence number 1 */
+#define S32G_WDT_SEQ2 0xB480 /* -> service sequence number 2 */
+
+#define S32G_SWT_CO(__base) ((__base) + 0x14) /* Counter output register */
+
+#define S32G_WDT_DEFAULT_TIMEOUT 30
+
+struct s32g_wdt_device {
+ int rate;
+ void __iomem *base;
+ struct watchdog_device wdog;
+};
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static unsigned int timeout_param = S32G_WDT_DEFAULT_TIMEOUT;
+module_param(timeout_param, uint, 0);
+MODULE_PARM_DESC(timeout_param, "Watchdog timeout in seconds (default="
+ __MODULE_STRING(S32G_WDT_DEFAULT_TIMEOUT) ")");
+
+static bool early_enable;
+module_param(early_enable, bool, 0);
+MODULE_PARM_DESC(early_enable,
+ "Watchdog is started on module insertion (default=false)");
+
+static const struct watchdog_info s32g_wdt_info = {
+ .identity = "s32g watchdog",
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+ WDIOC_GETTIMEOUT | WDIOC_GETTIMELEFT,
+};
+
+static struct s32g_wdt_device *wdd_to_s32g_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct s32g_wdt_device, wdog);
+}
+
+static unsigned int wdog_sec_to_count(struct s32g_wdt_device *wdev, unsigned int timeout)
+{
+ return wdev->rate * timeout;
+}
+
+static int s32g_wdt_ping(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+
+ writel(S32G_WDT_SEQ1, S32G_SWT_SR(wdev->base));
+ writel(S32G_WDT_SEQ2, S32G_SWT_SR(wdev->base));
+
+ return 0;
+}
+
+static int s32g_wdt_start(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+ unsigned long val;
+
+ val = readl(S32G_SWT_CR(wdev->base));
+
+ val |= S32G_SWT_CR_WEN;
+
+ writel(val, S32G_SWT_CR(wdev->base));
+
+ return 0;
+}
+
+static int s32g_wdt_stop(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+ unsigned long val;
+
+ val = readl(S32G_SWT_CR(wdev->base));
+
+ val &= ~S32G_SWT_CR_WEN;
+
+ writel(val, S32G_SWT_CR(wdev->base));
+
+ return 0;
+}
+
+static int s32g_wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+
+ writel(wdog_sec_to_count(wdev, timeout), S32G_SWT_TO(wdev->base));
+
+ wdog->timeout = timeout;
+
+ /*
+ * Conforming to the documentation, the timeout counter is
+ * loaded when servicing is operated (aka ping) or when the
+ * counter is enabled. In case the watchdog is already started
+ * it must be stopped and started again to update the timeout
+ * register or a ping can be sent to refresh the counter. Here
+ * we choose to send a ping to the watchdog which is harmless
+ * if the watchdog is stopped.
+ */
+ return s32g_wdt_ping(wdog);
+}
+
+static unsigned int s32g_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+ unsigned long counter;
+ bool is_running;
+
+ /*
+ * The counter output can be read only if the SWT is
+ * disabled. Given the latency between the internal counter
+ * and the counter output update, there can be very small
+ * difference. However, we can accept this matter of fact
+ * given the resolution is a second based unit for the output.
+ */
+ is_running = watchdog_hw_running(wdog);
+
+ if (is_running)
+ s32g_wdt_stop(wdog);
+
+ counter = readl(S32G_SWT_CO(wdev->base));
+
+ if (is_running)
+ s32g_wdt_start(wdog);
+
+ return counter / wdev->rate;
+}
+
+static const struct watchdog_ops s32g_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = s32g_wdt_start,
+ .stop = s32g_wdt_stop,
+ .ping = s32g_wdt_ping,
+ .set_timeout = s32g_wdt_set_timeout,
+ .get_timeleft = s32g_wdt_get_timeleft,
+};
+
+static void s32g_wdt_init(struct s32g_wdt_device *wdev)
+{
+ unsigned long val;
+
+ /* Set the watchdog's Time-Out value */
+ val = wdog_sec_to_count(wdev, wdev->wdog.timeout);
+
+ writel(val, S32G_SWT_TO(wdev->base));
+
+ /*
+ * Get the control register content. We are at init time, the
+ * watchdog should not be started.
+ */
+ val = readl(S32G_SWT_CR(wdev->base));
+
+ /*
+ * We want to allow the watchdog timer to be stopped when
+ * device enters debug mode.
+ */
+ val |= S32G_SWT_CR_FRZ;
+
+ /*
+ * However, when the CPU is in WFI or suspend mode, the
+ * watchdog must continue. The documentation refers it as the
+ * stopped mode.
+ */
+ val &= ~S32G_SWT_CR_STP;
+
+ /*
+ * Use Fixed Service Sequence to ping the watchdog which is
+ * 0x00 configuration value for the service mode. It should be
+ * already set because it is the default value but we reset it
+ * in case.
+ */
+ val &= ~S32G_SWT_CR_SM;
+
+ writel(val, S32G_SWT_CR(wdev->base));
+
+ /*
+ * When the 'early_enable' option is set, we start the
+ * watchdog from the kernel.
+ */
+ if (early_enable) {
+ s32g_wdt_start(&wdev->wdog);
+ set_bit(WDOG_HW_RUNNING, &wdev->wdog.status);
+ }
+}
+
+static int s32g_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct clk *clk;
+ struct s32g_wdt_device *wdev;
+ struct watchdog_device *wdog;
+ int ret;
+
+ wdev = devm_kzalloc(dev, sizeof(*wdev), GFP_KERNEL);
+ if (!wdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdev->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wdev->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(wdev->base), "Can not get resource\n");
+
+ clk = devm_clk_get_enabled(dev, "counter");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Can't get Watchdog clock\n");
+
+ wdev->rate = clk_get_rate(clk);
+ if (!wdev->rate) {
+ dev_err(dev, "Input clock rate is not valid\n");
+ return -EINVAL;
+ }
+
+ wdog = &wdev->wdog;
+ wdog->info = &s32g_wdt_info;
+ wdog->ops = &s32g_wdt_ops;
+
+ /*
+ * The code converts the timeout into a counter a value, if
+ * the value is less than 0x100, then it is clamped by the SWT
+ * module, so it is safe to specify a zero value as the
+ * minimum timeout.
+ */
+ wdog->min_timeout = 0;
+
+ /*
+ * The counter register is a 32 bits long, so the maximum
+ * counter value is UINT_MAX and the timeout in second is the
+ * value divided by the rate.
+ *
+ * For instance, a rate of 51MHz lead to 84 seconds maximum
+ * timeout.
+ */
+ wdog->max_timeout = UINT_MAX / wdev->rate;
+
+ /*
+ * The module param and the DT 'timeout-sec' property will
+ * override the default value if they are specified.
+ */
+ ret = watchdog_init_timeout(wdog, timeout_param, dev);
+ if (ret)
+ return ret;
+
+ /*
+ * As soon as the watchdog is started, there is no way to stop
+ * it if the 'nowayout' option is set at boot time
+ */
+ watchdog_set_nowayout(wdog, nowayout);
+
+ /*
+ * The devm_ version of the watchdog_register_device()
+ * function will call watchdog_unregister_device() when the
+ * device is removed.
+ */
+ watchdog_stop_on_unregister(wdog);
+
+ s32g_wdt_init(wdev);
+
+ ret = devm_watchdog_register_device(dev, wdog);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot register watchdog device\n");
+
+ dev_info(dev, "S32G Watchdog Timer Registered, timeout=%ds, nowayout=%d, early_enable=%d\n",
+ wdog->timeout, nowayout, early_enable);
+
+ return 0;
+}
+
+static const struct of_device_id s32g_wdt_dt_ids[] = {
+ { .compatible = "nxp,s32g2-swt" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s32g_wdt_dt_ids);
+
+static struct platform_driver s32g_wdt_driver = {
+ .probe = s32g_wdt_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = s32g_wdt_dt_ids,
+ },
+};
+
+module_platform_driver(s32g_wdt_driver);
+
+MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@linaro.org>");
+MODULE_DESCRIPTION("Watchdog driver for S32G SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index bdd81d8074b2..40901bdac426 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -82,6 +82,10 @@
#define GS_CLUSTER2_NONCPU_INT_EN 0x1644
#define GS_RST_STAT_REG_OFFSET 0x3B44
+#define EXYNOS990_CLUSTER2_NONCPU_OUT 0x1620
+#define EXYNOS990_CLUSTER2_NONCPU_INT_EN 0x1644
+#define EXYNOS990_CLUSTER2_WDTRESET_BIT 23
+
/**
* DOC: Quirk flags for different Samsung watchdog IP-cores
*
@@ -259,6 +263,32 @@ static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = {
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
};
+static const struct s3c2410_wdt_variant drv_data_exynos990_cl0 = {
+ .mask_reset_reg = GS_CLUSTER0_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS850_CLUSTER0_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOSAUTOV920_CLUSTER0_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
+ QUIRK_HAS_DBGACK_BIT,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos990_cl2 = {
+ .mask_reset_reg = EXYNOS990_CLUSTER2_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS990_CLUSTER2_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOS990_CLUSTER2_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
+ QUIRK_HAS_DBGACK_BIT,
+};
+
static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl0 = {
.mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN,
.mask_bit = 2,
@@ -350,6 +380,8 @@ static const struct of_device_id s3c2410_wdt_match[] = {
.data = &drv_data_exynos7 },
{ .compatible = "samsung,exynos850-wdt",
.data = &drv_data_exynos850_cl0 },
+ { .compatible = "samsung,exynos990-wdt",
+ .data = &drv_data_exynos990_cl0 },
{ .compatible = "samsung,exynosautov9-wdt",
.data = &drv_data_exynosautov9_cl0 },
{ .compatible = "samsung,exynosautov920-wdt",
@@ -678,7 +710,8 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt)
if (variant == &drv_data_exynos850_cl0 ||
variant == &drv_data_exynosautov9_cl0 ||
variant == &drv_data_gs101_cl0 ||
- variant == &drv_data_exynosautov920_cl0) {
+ variant == &drv_data_exynosautov920_cl0 ||
+ variant == &drv_data_exynos990_cl0) {
u32 index;
int err;
@@ -700,6 +733,10 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt)
else if (variant == &drv_data_exynosautov920_cl0)
variant = &drv_data_exynosautov920_cl1;
break;
+ case 2:
+ if (variant == &drv_data_exynos990_cl0)
+ variant = &drv_data_exynos990_cl2;
+ break;
default:
return dev_err_probe(dev, -EINVAL, "wrong cluster index: %u\n", index);
}
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index 8ad06b54c5ad..b356a272ff9a 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -291,7 +291,7 @@ static int stm32_iwdg_irq_init(struct platform_device *pdev,
return 0;
if (of_property_read_bool(np, "wakeup-source")) {
- ret = device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
if (ret)
return ret;
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index dc5f29560e9b..3918a600f2a0 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -264,7 +264,7 @@ static int wdtpci_get_status(int *status)
return 0;
}
-/**
+/*
* wdtpci_get_temperature:
*
* Reports the temperature in degrees Fahrenheit. The API is in